[ { "title": "\"Hey, that\u2019s not an ODE\": Faster ODE Adjoints via Seminorms", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10271", "id": "10271", "proceeding": "http://proceedings.mlr.press/v139/kidger21a.html", "slides": "", "author_site": "Patrick Kidger, Ricky T. Q. Chen, Terry Lyons", "author": "Patrick Kidger; Ricky T. Q. Chen; Terry J Lyons", "abstract": "Neural differential equations may be trained by backpropagating gradients via the adjoint method, which is another differential equation typically solved using an adaptive-step-size numerical differential equation solver. A proposed step is accepted if its error, \\emph{relative to some norm}, is sufficiently small; else it is rejected, the step is shrunk, and the process is repeated. Here, we demonstrate that the particular structure of the adjoint equations makes the usual choices of norm (such as $L^2$) unnecessarily stringent. By replacing it with a more appropriate (semi)norm, fewer steps are unnecessarily rejected and the backpropagation is made faster. This requires only minor code modifications. Experiments on a wide range of tasks\u2014including time series, generative modeling, and physical control\u2014demonstrate a median improvement of 40% fewer function evaluations. On some problems we see as much as 62% fewer function evaluations, so that the overall training time is roughly halved.", "bibtex": "@InProceedings{pmlr-v139-kidger21a,\n title = \t {\"Hey, that\u2019s not an ODE\": Faster ODE Adjoints via Seminorms},\n author = {Kidger, Patrick and Chen, Ricky T. Q. and Lyons, Terry J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5443--5452},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kidger21a/kidger21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kidger21a.html},\n abstract = \t {Neural differential equations may be trained by backpropagating gradients via the adjoint method, which is another differential equation typically solved using an adaptive-step-size numerical differential equation solver. A proposed step is accepted if its error, \\emph{relative to some norm}, is sufficiently small; else it is rejected, the step is shrunk, and the process is repeated. Here, we demonstrate that the particular structure of the adjoint equations makes the usual choices of norm (such as $L^2$) unnecessarily stringent. By replacing it with a more appropriate (semi)norm, fewer steps are unnecessarily rejected and the backpropagation is made faster. This requires only minor code modifications. Experiments on a wide range of tasks\u2014including time series, generative modeling, and physical control\u2014demonstrate a median improvement of 40% fewer function evaluations. On some problems we see as much as 62% fewer function evaluations, so that the overall training time is roughly halved.}\n}", "pdf": "http://proceedings.mlr.press/v139/kidger21a/kidger21a.pdf", "supp": "", "pdf_size": 794491, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8797786742526574165&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Mathematical Institute, University of Oxford, Oxford, United Kingdom+The Alan Turing Institute, The British Library, London, United Kingdom; University of Toronto+The Vector Institute; Mathematical Institute, University of Oxford, Oxford, United Kingdom+The Alan Turing Institute, The British Library, London, United Kingdom", "aff_domain": "maths.ox.ac.uk; ; ", "email": "maths.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kidger21a.html", "aff_unique_index": "0+1;2+3;0+1", "aff_unique_norm": "University of Oxford;Alan Turing Institute;University of Toronto;Vector Institute", "aff_unique_dep": "Mathematical Institute;;;", "aff_unique_url": "https://www.ox.ac.uk;https://www.turing.ac.uk;https://www.utoronto.ca;https://vectorinstitute.ai/", "aff_unique_abbr": "Oxford;ATI;U of T;Vector Institute", "aff_campus_unique_index": "0+1;;0+1", "aff_campus_unique": "Oxford;London;", "aff_country_unique_index": "0+0;1+1;0+0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "1-bit Adam: Communication Efficient Large-Scale Training with Adam\u2019s Convergence Speed", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9809", "id": "9809", "proceeding": "http://proceedings.mlr.press/v139/tang21a.html", "slides": "/media/icml-2021/Slides/9809.pdf", "author_site": "Hanlin Tang, Shaoduo Gan, Ammar Ahmad Awan, Samyam Rajbhandari, Conglong Li, Xiangru Lian, Ji Liu, Ce Zhang, Yuxiong He", "author": "Hanlin Tang; Shaoduo Gan; Ammar Ahmad Awan; Samyam Rajbhandari; Conglong Li; Xiangru Lian; Ji Liu; Ce Zhang; Yuxiong He", "abstract": "Scalable training of large models (like BERT and GPT-3) requires careful optimization rooted in model design, architecture, and system capabilities. From a system standpoint, communication has become a major bottleneck, especially on commodity systems with standard TCP interconnects that offer limited network bandwidth. Communication compression is an important technique to reduce training time on such systems. One of the most effective ways to compress communication is via error compensation compression, which offers robust convergence speed, even under 1-bit compression. However, state-of-the-art error compensation techniques only work with basic optimizers like SGD and momentum SGD, which are linearly dependent on the gradients. They do not work with non-linear gradient-based optimizers like Adam, which offer state-of-the-art convergence efficiency and accuracy for models like BERT. In this paper, we propose 1-bit Adam that reduces the communication volume by up to 5x, offers much better scalability, and provides the same convergence speed as uncompressed Adam. Our key finding is that Adam\u2019s variance becomes stable (after a warmup phase) and can be used as a fixed precondition for the rest of the training (compression phase). We performed experiments on up to 256 GPUs and show that 1-bit Adam enables up to 3.3x higher throughput for BERT-Large pre-training and up to 2.9x higher throughput for SQuAD fine-tuning. In addition, we provide theoretical analysis for 1-bit Adam.", "bibtex": "@InProceedings{pmlr-v139-tang21a,\n title = \t {1-bit Adam: Communication Efficient Large-Scale Training with Adam\u2019s Convergence Speed},\n author = {Tang, Hanlin and Gan, Shaoduo and Awan, Ammar Ahmad and Rajbhandari, Samyam and Li, Conglong and Lian, Xiangru and Liu, Ji and Zhang, Ce and He, Yuxiong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10118--10129},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tang21a/tang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tang21a.html},\n abstract = \t {Scalable training of large models (like BERT and GPT-3) requires careful optimization rooted in model design, architecture, and system capabilities. From a system standpoint, communication has become a major bottleneck, especially on commodity systems with standard TCP interconnects that offer limited network bandwidth. Communication compression is an important technique to reduce training time on such systems. One of the most effective ways to compress communication is via error compensation compression, which offers robust convergence speed, even under 1-bit compression. However, state-of-the-art error compensation techniques only work with basic optimizers like SGD and momentum SGD, which are linearly dependent on the gradients. They do not work with non-linear gradient-based optimizers like Adam, which offer state-of-the-art convergence efficiency and accuracy for models like BERT. In this paper, we propose 1-bit Adam that reduces the communication volume by up to 5x, offers much better scalability, and provides the same convergence speed as uncompressed Adam. Our key finding is that Adam\u2019s variance becomes stable (after a warmup phase) and can be used as a fixed precondition for the rest of the training (compression phase). We performed experiments on up to 256 GPUs and show that 1-bit Adam enables up to 3.3x higher throughput for BERT-Large pre-training and up to 2.9x higher throughput for SQuAD fine-tuning. In addition, we provide theoretical analysis for 1-bit Adam.}\n}", "pdf": "http://proceedings.mlr.press/v139/tang21a/tang21a.pdf", "supp": "", "pdf_size": 3029793, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=774430371966980192&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Microsoft; Department of Computer Science, University of Rochester; Department of Computer Science, ETH Zurich; Microsoft; Microsoft; Department of Computer Science, University of Rochester; Department of Computer Science, University of Rochester; Department of Computer Science, ETH Zurich; Microsoft", "aff_domain": "microsoft.com; ; ; ; ; ; ; ;microsoft.com", "email": "microsoft.com; ; ; ; ; ; ; ;microsoft.com", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/tang21a.html", "aff_unique_index": "0;1;2;0;0;1;1;2;0", "aff_unique_norm": "Microsoft;University of Rochester;ETH Zurich", "aff_unique_dep": "Microsoft Corporation;Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.microsoft.com;https://www.rochester.edu;https://www.ethz.ch", "aff_unique_abbr": "Microsoft;U of R;ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0;0;0;0;1;0", "aff_country_unique": "United States;Switzerland" }, { "title": "12-Lead ECG Reconstruction via Koopman Operators", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10177", "id": "10177", "proceeding": "http://proceedings.mlr.press/v139/golany21a.html", "slides": "", "author_site": "Tomer Golany, Kira Radinsky, Daniel Freedman, Saar Minha", "author": "Tomer Golany; Kira Radinsky; Daniel Freedman; Saar Minha", "abstract": "32% of all global deaths in the world are caused by cardiovascular diseases. Early detection, especially for patients with ischemia or cardiac arrhythmia, is crucial. To reduce the time between symptoms onset and treatment, wearable ECG sensors were developed to allow for the recording of the full 12-lead ECG signal at home. However, if even a single lead is not correctly positioned on the body that lead becomes corrupted, making automatic diagnosis on the basis of the full signal impossible. In this work, we present a methodology to reconstruct missing or noisy leads using the theory of Koopman Operators. Given a dataset consisting of full 12-lead ECGs, we learn a dynamical system describing the evolution of the 12 individual signals together in time. The Koopman theory indicates that there exists a high-dimensional embedding space in which the operator which propagates from one time instant to the next is linear. We therefore learn both the mapping to this embedding space, as well as the corresponding linear operator. Armed with this representation, we are able to impute missing leads by solving a least squares system in the embedding space, which can be achieved efficiently due to the sparse structure of the system. We perform an empirical evaluation using 12-lead ECG signals from thousands of patients, and show that we are able to reconstruct the signals in such way that enables accurate clinical diagnosis.", "bibtex": "@InProceedings{pmlr-v139-golany21a,\n title = \t {12-Lead ECG Reconstruction via Koopman Operators},\n author = {Golany, Tomer and Radinsky, Kira and Freedman, Daniel and Minha, Saar},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3745--3754},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/golany21a/golany21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/golany21a.html},\n abstract = \t {32% of all global deaths in the world are caused by cardiovascular diseases. Early detection, especially for patients with ischemia or cardiac arrhythmia, is crucial. To reduce the time between symptoms onset and treatment, wearable ECG sensors were developed to allow for the recording of the full 12-lead ECG signal at home. However, if even a single lead is not correctly positioned on the body that lead becomes corrupted, making automatic diagnosis on the basis of the full signal impossible. In this work, we present a methodology to reconstruct missing or noisy leads using the theory of Koopman Operators. Given a dataset consisting of full 12-lead ECGs, we learn a dynamical system describing the evolution of the 12 individual signals together in time. The Koopman theory indicates that there exists a high-dimensional embedding space in which the operator which propagates from one time instant to the next is linear. We therefore learn both the mapping to this embedding space, as well as the corresponding linear operator. Armed with this representation, we are able to impute missing leads by solving a least squares system in the embedding space, which can be achieved efficiently due to the sparse structure of the system. We perform an empirical evaluation using 12-lead ECG signals from thousands of patients, and show that we are able to reconstruct the signals in such way that enables accurate clinical diagnosis.}\n}", "pdf": "http://proceedings.mlr.press/v139/golany21a/golany21a.pdf", "supp": "", "pdf_size": 1452907, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5438335560248939200&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Technion - Israel Institute of Technology, Haifa, Israel; Google Research; Shamir Medical Center, Zerifin, Israel + Sackler School of Medicine, Tel-Aviv University, Israel; Technion - Israel Institute of Technology, Haifa, Israel", "aff_domain": "cs.technion.ac.il;google.com;gmail.com;cs.technion.ac.il", "email": "cs.technion.ac.il;google.com;gmail.com;cs.technion.ac.il", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/golany21a.html", "aff_unique_index": "0;1;2+3;0", "aff_unique_norm": "Technion - Israel Institute of Technology;Google;Shamir Medical Center;Tel-Aviv University", "aff_unique_dep": ";Google Research;;Sackler School of Medicine", "aff_unique_url": "https://www.technion.ac.il;https://research.google;;https://www.tau.ac.il", "aff_unique_abbr": "Technion;Google Research;;TAU", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "Haifa;Mountain View;Zerifin;", "aff_country_unique_index": "0;1;0+0;0", "aff_country_unique": "Israel;United States" }, { "title": "A Bit More Bayesian: Domain-Invariant Learning with Uncertainty", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10665", "id": "10665", "proceeding": "http://proceedings.mlr.press/v139/xiao21a.html", "slides": "", "author_site": "Zehao Xiao, Jiayi Shen, Xiantong Zhen, Ling Shao, Cees Snoek", "author": "Zehao Xiao; Jiayi Shen; Xiantong Zhen; Ling Shao; Cees Snoek", "abstract": "Domain generalization is challenging due to the domain shift and the uncertainty caused by the inaccessibility of target domain data. In this paper, we address both challenges with a probabilistic framework based on variational Bayesian inference, by incorporating uncertainty into neural network weights. We couple domain invariance in a probabilistic formula with the variational Bayesian inference. This enables us to explore domain-invariant learning in a principled way. Specifically, we derive domain-invariant representations and classifiers, which are jointly established in a two-layer Bayesian neural network. We empirically demonstrate the effectiveness of our proposal on four widely used cross-domain visual recognition benchmarks. Ablation studies validate the synergistic benefits of our Bayesian treatment when jointly learning domain-invariant representations and classifiers for domain generalization. Further, our method consistently delivers state-of-the-art mean accuracy on all benchmarks.", "bibtex": "@InProceedings{pmlr-v139-xiao21a,\n title = \t {A Bit More Bayesian: Domain-Invariant Learning with Uncertainty},\n author = {Xiao, Zehao and Shen, Jiayi and Zhen, Xiantong and Shao, Ling and Snoek, Cees},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11351--11361},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xiao21a/xiao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/xiao21a.html},\n abstract = \t {Domain generalization is challenging due to the domain shift and the uncertainty caused by the inaccessibility of target domain data. In this paper, we address both challenges with a probabilistic framework based on variational Bayesian inference, by incorporating uncertainty into neural network weights. We couple domain invariance in a probabilistic formula with the variational Bayesian inference. This enables us to explore domain-invariant learning in a principled way. Specifically, we derive domain-invariant representations and classifiers, which are jointly established in a two-layer Bayesian neural network. We empirically demonstrate the effectiveness of our proposal on four widely used cross-domain visual recognition benchmarks. Ablation studies validate the synergistic benefits of our Bayesian treatment when jointly learning domain-invariant representations and classifiers for domain generalization. Further, our method consistently delivers state-of-the-art mean accuracy on all benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/xiao21a/xiao21a.pdf", "supp": "", "pdf_size": 889418, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8533759072554466832&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "AIM Lab, University of Amsterdam, The Netherlands; AIM Lab, University of Amsterdam, The Netherlands; AIM Lab, University of Amsterdam, The Netherlands+Inception Institute of Arti\ufb01cial Intelligence, UAE; Inception Institute of Arti\ufb01cial Intelligence, UAE; AIM Lab, University of Amsterdam, The Netherlands", "aff_domain": "uva.nl;uva.nl;uva.nl;inceptioniai.org;uva.nl", "email": "uva.nl;uva.nl;uva.nl;inceptioniai.org;uva.nl", "github": "https://github.com/zzzx1224/A-Bit-More-Bayesian.git", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/xiao21a.html", "aff_unique_index": "0;0;0+1;1;0", "aff_unique_norm": "University of Amsterdam;Inception Institute of Artificial Intelligence", "aff_unique_dep": "AIM Lab;", "aff_unique_url": "https://www.uva.nl;https://www.inceptioniai.org", "aff_unique_abbr": "UvA;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+1;1;0", "aff_country_unique": "Netherlands;United Arab Emirates" }, { "title": "A Collective Learning Framework to Boost GNN Expressiveness for Node Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9337", "id": "9337", "proceeding": "http://proceedings.mlr.press/v139/hang21a.html", "slides": "", "author_site": "Mengyue Hang, Jennifer Neville, Bruno Ribeiro", "author": "Mengyue Hang; Jennifer Neville; Bruno Ribeiro", "abstract": "Collective Inference (CI) is a procedure designed to boost weak relational classifiers, specially for node classification tasks. Graph Neural Networks (GNNs) are strong classifiers that have been used with great success. Unfortunately, most existing practical GNNs are not most-expressive (universal). Thus, it is an open question whether one can improve strong relational node classifiers, such as GNNs, with CI. In this work, we investigate this question and propose {\\em collective learning} for GNNs \u2014a general collective classification approach for node representation learning that increases their representation power. We show that previous attempts to incorporate CI into GNNs fail to boost their expressiveness because they do not adapt CI\u2019s Monte Carlo sampling to representation learning. We evaluate our proposed framework with a variety of state-of-the-art GNNs. Our experiments show a consistent, significant boost in node classification accuracy \u2014regardless of the choice of underlying GNN\u2014 for inductive node classification in partially-labeled graphs, across five real-world network datasets.", "bibtex": "@InProceedings{pmlr-v139-hang21a,\n title = \t {A Collective Learning Framework to Boost GNN Expressiveness for Node Classification},\n author = {Hang, Mengyue and Neville, Jennifer and Ribeiro, Bruno},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4040--4050},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hang21a/hang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hang21a.html},\n abstract = \t {Collective Inference (CI) is a procedure designed to boost weak relational classifiers, specially for node classification tasks. Graph Neural Networks (GNNs) are strong classifiers that have been used with great success. Unfortunately, most existing practical GNNs are not most-expressive (universal). Thus, it is an open question whether one can improve strong relational node classifiers, such as GNNs, with CI. In this work, we investigate this question and propose {\\em collective learning} for GNNs \u2014a general collective classification approach for node representation learning that increases their representation power. We show that previous attempts to incorporate CI into GNNs fail to boost their expressiveness because they do not adapt CI\u2019s Monte Carlo sampling to representation learning. We evaluate our proposed framework with a variety of state-of-the-art GNNs. Our experiments show a consistent, significant boost in node classification accuracy \u2014regardless of the choice of underlying GNN\u2014 for inductive node classification in partially-labeled graphs, across five real-world network datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/hang21a/hang21a.pdf", "supp": "", "pdf_size": 521172, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10207534505024667789&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Purdue University, West Lafayette, Indiana, USA; Department of Computer Science, Purdue University, West Lafayette, Indiana, USA; Department of Computer Science, Purdue University, West Lafayette, Indiana, USA", "aff_domain": "purdue.edu; ; ", "email": "purdue.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hang21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Purdue University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.purdue.edu", "aff_unique_abbr": "Purdue", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "West Lafayette", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Deep Reinforcement Learning Approach to Marginalized Importance Sampling with the Successor Representation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9999", "id": "9999", "proceeding": "http://proceedings.mlr.press/v139/fujimoto21a.html", "slides": "", "author_site": "Scott Fujimoto, David Meger, Doina Precup", "author": "Scott Fujimoto; David Meger; Doina Precup", "abstract": "Marginalized importance sampling (MIS), which measures the density ratio between the state-action occupancy of a target policy and that of a sampling distribution, is a promising approach for off-policy evaluation. However, current state-of-the-art MIS methods rely on complex optimization tricks and succeed mostly on simple toy problems. We bridge the gap between MIS and deep reinforcement learning by observing that the density ratio can be computed from the successor representation of the target policy. The successor representation can be trained through deep reinforcement learning methodology and decouples the reward optimization from the dynamics of the environment, making the resulting algorithm stable and applicable to high-dimensional domains. We evaluate the empirical performance of our approach on a variety of challenging Atari and MuJoCo environments.", "bibtex": "@InProceedings{pmlr-v139-fujimoto21a,\n title = \t {A Deep Reinforcement Learning Approach to Marginalized Importance Sampling with the Successor Representation},\n author = {Fujimoto, Scott and Meger, David and Precup, Doina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3518--3529},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fujimoto21a/fujimoto21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fujimoto21a.html},\n abstract = \t {Marginalized importance sampling (MIS), which measures the density ratio between the state-action occupancy of a target policy and that of a sampling distribution, is a promising approach for off-policy evaluation. However, current state-of-the-art MIS methods rely on complex optimization tricks and succeed mostly on simple toy problems. We bridge the gap between MIS and deep reinforcement learning by observing that the density ratio can be computed from the successor representation of the target policy. The successor representation can be trained through deep reinforcement learning methodology and decouples the reward optimization from the dynamics of the environment, making the resulting algorithm stable and applicable to high-dimensional domains. We evaluate the empirical performance of our approach on a variety of challenging Atari and MuJoCo environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/fujimoto21a/fujimoto21a.pdf", "supp": "", "pdf_size": 5164677, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2623436752996151694&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Mila, McGill University; Mila, McGill University; Mila, McGill University", "aff_domain": "mail.mcgill.ca; ; ", "email": "mail.mcgill.ca; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fujimoto21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "McGill University", "aff_unique_dep": "Mila", "aff_unique_url": "https://www.mcgill.ca", "aff_unique_abbr": "McGill", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Montreal", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "title": "A Differentiable Point Process with Its Application to Spiking Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10651", "id": "10651", "proceeding": "http://proceedings.mlr.press/v139/kajino21a.html", "slides": "/media/icml-2021/Slides/10651.pdf", "author": "Hiroshi Kajino", "abstract": "This paper is concerned about a learning algorithm for a probabilistic model of spiking neural networks (SNNs). Jimenez Rezende & Gerstner (2014) proposed a stochastic variational inference algorithm to train SNNs with hidden neurons. The algorithm updates the variational distribution using the score function gradient estimator, whose high variance often impedes the whole learning algorithm. This paper presents an alternative gradient estimator for SNNs based on the path-wise gradient estimator. The main technical difficulty is a lack of a general method to differentiate a realization of an arbitrary point process, which is necessary to derive the path-wise gradient estimator. We develop a differentiable point process, which is the technical highlight of this paper, and apply it to derive the path-wise gradient estimator for SNNs. We investigate the effectiveness of our gradient estimator through numerical simulation.", "bibtex": "@InProceedings{pmlr-v139-kajino21a,\n title = \t {A Differentiable Point Process with Its Application to Spiking Neural Networks},\n author = {Kajino, Hiroshi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5226--5235},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kajino21a/kajino21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kajino21a.html},\n abstract = \t {This paper is concerned about a learning algorithm for a probabilistic model of spiking neural networks (SNNs). Jimenez Rezende & Gerstner (2014) proposed a stochastic variational inference algorithm to train SNNs with hidden neurons. The algorithm updates the variational distribution using the score function gradient estimator, whose high variance often impedes the whole learning algorithm. This paper presents an alternative gradient estimator for SNNs based on the path-wise gradient estimator. The main technical difficulty is a lack of a general method to differentiate a realization of an arbitrary point process, which is necessary to derive the path-wise gradient estimator. We develop a differentiable point process, which is the technical highlight of this paper, and apply it to derive the path-wise gradient estimator for SNNs. We investigate the effectiveness of our gradient estimator through numerical simulation.}\n}", "pdf": "http://proceedings.mlr.press/v139/kajino21a/kajino21a.pdf", "supp": "", "pdf_size": 497319, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18295729593563933234&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "IBM Research - Tokyo, Tokyo, Japan", "aff_domain": "jp.ibm.com", "email": "jp.ibm.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/kajino21a.html", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research", "aff_unique_url": "https://www.ibm.com/research", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0", "aff_country_unique": "Japan" }, { "title": "A Discriminative Technique for Multiple-Source Adaptation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10189", "id": "10189", "proceeding": "http://proceedings.mlr.press/v139/cortes21b.html", "slides": "", "author_site": "Corinna Cortes, Mehryar Mohri, Ananda Theertha Suresh, Ningshan Zhang", "author": "Corinna Cortes; Mehryar Mohri; Ananda Theertha Suresh; Ningshan Zhang", "abstract": "We present a new discriminative technique for the multiple-source adaptation (MSA) problem. Unlike previous work, which relies on density estimation for each source domain, our solution only requires conditional probabilities that can be straightforwardly accurately estimated from unlabeled data from the source domains. We give a detailed analysis of our new technique, including general guarantees based on R\u00e9nyi divergences, and learning bounds when conditional Maxent is used for estimating conditional probabilities for a point to belong to a source domain. We show that these guarantees compare favorably to those that can be derived for the generative solution, using kernel density estimation. Our experiments with real-world applications further demonstrate that our new discriminative MSA algorithm outperforms the previous generative solution as well as other domain adaptation baselines.", "bibtex": "@InProceedings{pmlr-v139-cortes21b,\n title = \t {A Discriminative Technique for Multiple-Source Adaptation},\n author = {Cortes, Corinna and Mohri, Mehryar and Suresh, Ananda Theertha and Zhang, Ningshan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2132--2143},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cortes21b/cortes21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cortes21b.html},\n abstract = \t {We present a new discriminative technique for the multiple-source adaptation (MSA) problem. Unlike previous work, which relies on density estimation for each source domain, our solution only requires conditional probabilities that can be straightforwardly accurately estimated from unlabeled data from the source domains. We give a detailed analysis of our new technique, including general guarantees based on R\u00e9nyi divergences, and learning bounds when conditional Maxent is used for estimating conditional probabilities for a point to belong to a source domain. We show that these guarantees compare favorably to those that can be derived for the generative solution, using kernel density estimation. Our experiments with real-world applications further demonstrate that our new discriminative MSA algorithm outperforms the previous generative solution as well as other domain adaptation baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/cortes21b/cortes21b.pdf", "supp": "", "pdf_size": 3114649, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=485714480848548825&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, New York, NY; Courant Institute of Mathematical Sciences, New York, NY; Google Research, New York, NY; Hudson River Trading, New York, NY", "aff_domain": "google.com;cs.nyu.edu;google.com;hudson-trading.com", "email": "google.com;cs.nyu.edu;google.com;hudson-trading.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cortes21b.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Google;Courant Institute of Mathematical Sciences;Hudson River Trading", "aff_unique_dep": "Google Research;Mathematical Sciences;", "aff_unique_url": "https://research.google;https://courant.nyu.edu;https://www.hudsonrivertarding.com", "aff_unique_abbr": "Google Research;Courant;HRT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "New York;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "A Distribution-dependent Analysis of Meta Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10047", "id": "10047", "proceeding": "http://proceedings.mlr.press/v139/konobeev21a.html", "slides": "/media/icml-2021/Slides/10047.pdf", "author_site": "Mikhail Konobeev, Ilja Kuzborskij, Csaba Szepesvari", "author": "Mikhail Konobeev; Ilja Kuzborskij; Csaba Szepesvari", "abstract": "A key problem in the theory of meta-learning is to understand how the task distributions influence transfer risk, the expected error of a meta-learner on a new task drawn from the unknown task distribution. In this paper, focusing on fixed design linear regression with Gaussian noise and a Gaussian task (or parameter) distribution, we give distribution-dependent lower bounds on the transfer risk of any algorithm, while we also show that a novel, weighted version of the so-called biased regularized regression method is able to match these lower bounds up to a fixed constant factor. Notably, the weighting is derived from the covariance of the Gaussian task distribution. Altogether, our results provide a precise characterization of the difficulty of meta-learning in this Gaussian setting. While this problem setting may appear simple, we show that it is rich enough to unify the \u201cparameter sharing\u201d and \u201crepresentation learning\u201d streams of meta-learning; in particular, representation learning is obtained as the special case when the covariance matrix of the task distribution is unknown. For this case we propose to adopt the EM method, which is shown to enjoy efficient updates in our case. The paper is completed by an empirical study of EM. In particular, our experimental results show that the EM algorithm can attain the lower bound as the number of tasks grows, while the algorithm is also successful in competing with its alternatives when used in a representation learning context.", "bibtex": "@InProceedings{pmlr-v139-konobeev21a,\n title = \t {A Distribution-dependent Analysis of Meta Learning},\n author = {Konobeev, Mikhail and Kuzborskij, Ilja and Szepesvari, Csaba},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5697--5706},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/konobeev21a/konobeev21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/konobeev21a.html},\n abstract = \t {A key problem in the theory of meta-learning is to understand how the task distributions influence transfer risk, the expected error of a meta-learner on a new task drawn from the unknown task distribution. In this paper, focusing on fixed design linear regression with Gaussian noise and a Gaussian task (or parameter) distribution, we give distribution-dependent lower bounds on the transfer risk of any algorithm, while we also show that a novel, weighted version of the so-called biased regularized regression method is able to match these lower bounds up to a fixed constant factor. Notably, the weighting is derived from the covariance of the Gaussian task distribution. Altogether, our results provide a precise characterization of the difficulty of meta-learning in this Gaussian setting. While this problem setting may appear simple, we show that it is rich enough to unify the \u201cparameter sharing\u201d and \u201crepresentation learning\u201d streams of meta-learning; in particular, representation learning is obtained as the special case when the covariance matrix of the task distribution is unknown. For this case we propose to adopt the EM method, which is shown to enjoy efficient updates in our case. The paper is completed by an empirical study of EM. In particular, our experimental results show that the EM algorithm can attain the lower bound as the number of tasks grows, while the algorithm is also successful in competing with its alternatives when used in a representation learning context.}\n}", "pdf": "http://proceedings.mlr.press/v139/konobeev21a/konobeev21a.pdf", "supp": "", "pdf_size": 690156, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=130478103891519086&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Computing Science Department, University of Alberta, Edmonton, Alberta, Canada+DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; Computing Science Department, University of Alberta, Edmonton, Alberta, Canada+DeepMind, London, United Kingdom", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/konobeev21a.html", "aff_unique_index": "0+1;1;0+1", "aff_unique_norm": "University of Alberta;DeepMind", "aff_unique_dep": "Computing Science Department;", "aff_unique_url": "https://www.ualberta.ca;https://deepmind.com", "aff_unique_abbr": "UAlberta;DeepMind", "aff_campus_unique_index": "0+1;1;0+1", "aff_campus_unique": "Edmonton;London", "aff_country_unique_index": "0+1;1;0+1", "aff_country_unique": "Canada;United Kingdom" }, { "title": "A Framework for Private Matrix Analysis in Sliding Window Model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8999", "id": "8999", "proceeding": "http://proceedings.mlr.press/v139/upadhyay21a.html", "slides": "", "author_site": "Jalaj Upadhyay, Sarvagya Upadhyay", "author": "Jalaj Upadhyay; Sarvagya Upadhyay", "abstract": "We perform a rigorous study of private matrix analysis when only the last $W$ updates to matrices are considered useful for analysis. We show the existing framework in the non-private setting is not robust to noise required for privacy. We then propose a framework robust to noise and use it to give first efficient $o(W)$ space differentially private algorithms for spectral approximation, principal component analysis (PCA), multi-response linear regression, sparse PCA, and non-negative PCA. Prior to our work, no such result was known for sparse and non-negative differentially private PCA even in the static data setting. We also give a lower bound to demonstrate the cost of privacy in the sliding window model.", "bibtex": "@InProceedings{pmlr-v139-upadhyay21a,\n title = \t {A Framework for Private Matrix Analysis in Sliding Window Model},\n author = {Upadhyay, Jalaj and Upadhyay, Sarvagya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10465--10475},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/upadhyay21a/upadhyay21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/upadhyay21a.html},\n abstract = \t {We perform a rigorous study of private matrix analysis when only the last $W$ updates to matrices are considered useful for analysis. We show the existing framework in the non-private setting is not robust to noise required for privacy. We then propose a framework robust to noise and use it to give first efficient $o(W)$ space differentially private algorithms for spectral approximation, principal component analysis (PCA), multi-response linear regression, sparse PCA, and non-negative PCA. Prior to our work, no such result was known for sparse and non-negative differentially private PCA even in the static data setting. We also give a lower bound to demonstrate the cost of privacy in the sliding window model.}\n}", "pdf": "http://proceedings.mlr.press/v139/upadhyay21a/upadhyay21a.pdf", "supp": "", "pdf_size": 780245, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=869664023679897417&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Apple, USA; Fujitsu Research of America, USA", "aff_domain": "apple.com; ", "email": "apple.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/upadhyay21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Apple;Fujitsu Research of America", "aff_unique_dep": "Apple Inc.;", "aff_unique_url": "https://www.apple.com;https://www.fujitsu.com/us/", "aff_unique_abbr": "Apple;FRA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "A Free Lunch From ANN: Towards Efficient, Accurate Spiking Neural Networks Calibration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9041", "id": "9041", "proceeding": "http://proceedings.mlr.press/v139/li21d.html", "slides": "", "author_site": "Yuhang Li, Shikuang Deng, Xin Dong, Ruihao Gong, Shi Gu", "author": "Yuhang Li; Shikuang Deng; Xin Dong; Ruihao Gong; Shi Gu", "abstract": "Spiking Neural Network (SNN) has been recognized as one of the next generation of neural networks. Conventionally, SNN can be converted from a pre-trained ANN by only replacing the ReLU activation to spike activation while keeping the parameters intact. Perhaps surprisingly, in this work we show that a proper way to calibrate the parameters during the conversion of ANN to SNN can bring significant improvements. We introduce SNN Calibration, a cheap but extraordinarily effective method by leveraging the knowledge within a pre-trained Artificial Neural Network (ANN). Starting by analyzing the conversion error and its propagation through layers theoretically, we propose the calibration algorithm that can correct the error layer-by-layer. The calibration only takes a handful number of training data and several minutes to finish. Moreover, our calibration algorithm can produce SNN with state-of-the-art architecture on the large-scale ImageNet dataset, including MobileNet and RegNet. Extensive experiments demonstrate the effectiveness and efficiency of our algorithm. For example, our advanced pipeline can increase up to 69% top-1 accuracy when converting MobileNet on ImageNet compared to baselines. Codes are released at https://github.com/yhhhli/SNN_Calibration.", "bibtex": "@InProceedings{pmlr-v139-li21d,\n title = \t {A Free Lunch From ANN: Towards Efficient, Accurate Spiking Neural Networks Calibration},\n author = {Li, Yuhang and Deng, Shikuang and Dong, Xin and Gong, Ruihao and Gu, Shi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6316--6325},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21d/li21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21d.html},\n abstract = \t {Spiking Neural Network (SNN) has been recognized as one of the next generation of neural networks. Conventionally, SNN can be converted from a pre-trained ANN by only replacing the ReLU activation to spike activation while keeping the parameters intact. Perhaps surprisingly, in this work we show that a proper way to calibrate the parameters during the conversion of ANN to SNN can bring significant improvements. We introduce SNN Calibration, a cheap but extraordinarily effective method by leveraging the knowledge within a pre-trained Artificial Neural Network (ANN). Starting by analyzing the conversion error and its propagation through layers theoretically, we propose the calibration algorithm that can correct the error layer-by-layer. The calibration only takes a handful number of training data and several minutes to finish. Moreover, our calibration algorithm can produce SNN with state-of-the-art architecture on the large-scale ImageNet dataset, including MobileNet and RegNet. Extensive experiments demonstrate the effectiveness and efficiency of our algorithm. For example, our advanced pipeline can increase up to 69% top-1 accuracy when converting MobileNet on ImageNet compared to baselines. Codes are released at https://github.com/yhhhli/SNN_Calibration.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21d/li21d.pdf", "supp": "", "pdf_size": 3101908, "gs_citation": 235, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15407151931731425738&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of Electronic Science and Technology of China+Yale University; University of Electronic Science and Technology of China; Harvard University; SenseTime Research; University of Electronic Science and Technology of China", "aff_domain": "yale.edu;uestc.edu.cn; ; ;uestc.edu.cn", "email": "yale.edu;uestc.edu.cn; ; ;uestc.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/li21d.html", "aff_unique_index": "0+1;0;2;3;0", "aff_unique_norm": "University of Electronic Science and Technology of China;Yale University;Harvard University;SenseTime", "aff_unique_dep": ";;;SenseTime Research", "aff_unique_url": "https://www.uestc.edu.cn;https://www.yale.edu;https://www.harvard.edu;https://www.sensetime.com", "aff_unique_abbr": "UESTC;Yale;Harvard;SenseTime", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0;1;0;0", "aff_country_unique": "China;United States" }, { "title": "A Functional Perspective on Learning Symmetric Functions with Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10631", "id": "10631", "proceeding": "http://proceedings.mlr.press/v139/zweig21a.html", "slides": "/media/icml-2021/Slides/10631.pdf", "author_site": "Aaron Zweig, Joan Bruna", "author": "Aaron Zweig; Joan Bruna", "abstract": "Symmetric functions, which take as input an unordered, fixed-size set, are known to be universally representable by neural networks that enforce permutation invariance. These architectures only give guarantees for fixed input sizes, yet in many practical applications, including point clouds and particle physics, a relevant notion of generalization should include varying the input size. In this work we treat symmetric functions (of any size) as functions over probability measures, and study the learning and representation of neural networks defined on measures. By focusing on shallow architectures, we establish approximation and generalization bounds under different choices of regularization (such as RKHS and variation norms), that capture a hierarchy of functional spaces with increasing degree of non-linear learning. The resulting models can be learned efficiently and enjoy generalization guarantees that extend across input sizes, as we verify empirically.", "bibtex": "@InProceedings{pmlr-v139-zweig21a,\n title = \t {A Functional Perspective on Learning Symmetric Functions with Neural Networks},\n author = {Zweig, Aaron and Bruna, Joan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {13023--13032},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zweig21a/zweig21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zweig21a.html},\n abstract = \t {Symmetric functions, which take as input an unordered, fixed-size set, are known to be universally representable by neural networks that enforce permutation invariance. These architectures only give guarantees for fixed input sizes, yet in many practical applications, including point clouds and particle physics, a relevant notion of generalization should include varying the input size. In this work we treat symmetric functions (of any size) as functions over probability measures, and study the learning and representation of neural networks defined on measures. By focusing on shallow architectures, we establish approximation and generalization bounds under different choices of regularization (such as RKHS and variation norms), that capture a hierarchy of functional spaces with increasing degree of non-linear learning. The resulting models can be learned efficiently and enjoy generalization guarantees that extend across input sizes, as we verify empirically.}\n}", "pdf": "http://proceedings.mlr.press/v139/zweig21a/zweig21a.pdf", "supp": "", "pdf_size": 560354, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15843851846384894962&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Courant Institute of Mathematical Sciences, New York University, New York+Center for Data Science, New York University, New York; Courant Institute of Mathematical Sciences, New York University, New York+Center for Data Science, New York University, New York", "aff_domain": "nyu.edu; ", "email": "nyu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zweig21a.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "New York University", "aff_unique_dep": "Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "New York", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "A General Framework For Detecting Anomalous Inputs to DNN Classifiers", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10151", "id": "10151", "proceeding": "http://proceedings.mlr.press/v139/raghuram21a.html", "slides": "/media/icml-2021/Slides/10151.pdf", "author_site": "Jayaram Raghuram, Varun Chandrasekaran, Somesh Jha, Suman Banerjee", "author": "Jayaram Raghuram; Varun Chandrasekaran; Somesh Jha; Suman Banerjee", "abstract": "Detecting anomalous inputs, such as adversarial and out-of-distribution (OOD) inputs, is critical for classifiers (including deep neural networks or DNNs) deployed in real-world applications. While prior works have proposed various methods to detect such anomalous samples using information from the internal layer representations of a DNN, there is a lack of consensus on a principled approach for the different components of such a detection method. As a result, often heuristic and one-off methods are applied for different aspects of this problem. We propose an unsupervised anomaly detection framework based on the internal DNN layer representations in the form of a meta-algorithm with configurable components. We proceed to propose specific instantiations for each component of the meta-algorithm based on ideas grounded in statistical testing and anomaly detection. We evaluate the proposed methods on well-known image classification datasets with strong adversarial attacks and OOD inputs, including an adaptive attack that uses the internal layer representations of the DNN (often not considered in prior work). Comparisons with five recently-proposed competing detection methods demonstrates the effectiveness of our method in detecting adversarial and OOD inputs.", "bibtex": "@InProceedings{pmlr-v139-raghuram21a,\n title = \t {A General Framework For Detecting Anomalous Inputs to DNN Classifiers},\n author = {Raghuram, Jayaram and Chandrasekaran, Varun and Jha, Somesh and Banerjee, Suman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8764--8775},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/raghuram21a/raghuram21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/raghuram21a.html},\n abstract = \t {Detecting anomalous inputs, such as adversarial and out-of-distribution (OOD) inputs, is critical for classifiers (including deep neural networks or DNNs) deployed in real-world applications. While prior works have proposed various methods to detect such anomalous samples using information from the internal layer representations of a DNN, there is a lack of consensus on a principled approach for the different components of such a detection method. As a result, often heuristic and one-off methods are applied for different aspects of this problem. We propose an unsupervised anomaly detection framework based on the internal DNN layer representations in the form of a meta-algorithm with configurable components. We proceed to propose specific instantiations for each component of the meta-algorithm based on ideas grounded in statistical testing and anomaly detection. We evaluate the proposed methods on well-known image classification datasets with strong adversarial attacks and OOD inputs, including an adaptive attack that uses the internal layer representations of the DNN (often not considered in prior work). Comparisons with five recently-proposed competing detection methods demonstrates the effectiveness of our method in detecting adversarial and OOD inputs.}\n}", "pdf": "http://proceedings.mlr.press/v139/raghuram21a/raghuram21a.pdf", "supp": "", "pdf_size": 1537344, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7846344670241873650&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Computer Sciences, University of Wisconsin, Madison, USA; Computer Sciences, University of Wisconsin, Madison, USA; Computer Sciences, University of Wisconsin, Madison, USA + XaiPient Inc., Princeton, NJ, USA; Computer Sciences, University of Wisconsin, Madison, USA", "aff_domain": "cs.wisc.edu; ; ; ", "email": "cs.wisc.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/raghuram21a.html", "aff_unique_index": "0;0;0+1;0", "aff_unique_norm": "University of Wisconsin-Madison;XaiPient Inc.", "aff_unique_dep": "Computer Sciences;", "aff_unique_url": "https://www.wisc.edu;", "aff_unique_abbr": "UW-Madison;", "aff_campus_unique_index": "0;0;0+1;0", "aff_campus_unique": "Madison;Princeton", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "A Gradient Based Strategy for Hamiltonian Monte Carlo Hyperparameter Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10755", "id": "10755", "proceeding": "http://proceedings.mlr.press/v139/campbell21a.html", "slides": "", "author_site": "Andrew Campbell, Wenlong Chen, Vincent Stimper, Jose Miguel Hernandez-Lobato, Yichuan Zhang", "author": "Andrew Campbell; Wenlong Chen; Vincent Stimper; Jose Miguel Hernandez-Lobato; Yichuan Zhang", "abstract": "Hamiltonian Monte Carlo (HMC) is one of the most successful sampling methods in machine learning. However, its performance is significantly affected by the choice of hyperparameter values. Existing approaches for optimizing the HMC hyperparameters either optimize a proxy for mixing speed or consider the HMC chain as an implicit variational distribution and optimize a tractable lower bound that can be very loose in practice. Instead, we propose to optimize an objective that quantifies directly the speed of convergence to the target distribution. Our objective can be easily optimized using stochastic gradient descent. We evaluate our proposed method and compare to baselines on a variety of problems including sampling from synthetic 2D distributions, reconstructing sparse signals, learning deep latent variable models and sampling molecular configurations from the Boltzmann distribution of a 22 atom molecule. We find that our method is competitive with or improves upon alternative baselines in all these experiments.", "bibtex": "@InProceedings{pmlr-v139-campbell21a,\n title = \t {A Gradient Based Strategy for Hamiltonian Monte Carlo Hyperparameter Optimization},\n author = {Campbell, Andrew and Chen, Wenlong and Stimper, Vincent and Hernandez-Lobato, Jose Miguel and Zhang, Yichuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1238--1248},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/campbell21a/campbell21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/campbell21a.html},\n abstract = \t {Hamiltonian Monte Carlo (HMC) is one of the most successful sampling methods in machine learning. However, its performance is significantly affected by the choice of hyperparameter values. Existing approaches for optimizing the HMC hyperparameters either optimize a proxy for mixing speed or consider the HMC chain as an implicit variational distribution and optimize a tractable lower bound that can be very loose in practice. Instead, we propose to optimize an objective that quantifies directly the speed of convergence to the target distribution. Our objective can be easily optimized using stochastic gradient descent. We evaluate our proposed method and compare to baselines on a variety of problems including sampling from synthetic 2D distributions, reconstructing sparse signals, learning deep latent variable models and sampling molecular configurations from the Boltzmann distribution of a 22 atom molecule. We find that our method is competitive with or improves upon alternative baselines in all these experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/campbell21a/campbell21a.pdf", "supp": "", "pdf_size": 3161417, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5348502742696113925&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/campbell21a.html" }, { "title": "A Hybrid Variance-Reduced Method for Decentralized Stochastic Non-Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8935", "id": "8935", "proceeding": "http://proceedings.mlr.press/v139/xin21a.html", "slides": "", "author_site": "Ran Xin, Usman Khan, Soummya Kar", "author": "Ran Xin; Usman Khan; Soummya Kar", "abstract": "This paper considers decentralized stochastic optimization over a network of $n$ nodes, where each node possesses a smooth non-convex local cost function and the goal of the networked nodes is to find an $\\epsilon$-accurate first-order stationary point of the sum of the local costs. We focus on an online setting, where each node accesses its local cost only by means of a stochastic first-order oracle that returns a noisy version of the exact gradient. In this context, we propose a novel single-loop decentralized hybrid variance-reduced stochastic gradient method, called GT-HSGD, that outperforms the existing approaches in terms of both the oracle complexity and practical implementation. The GT-HSGD algorithm implements specialized local hybrid stochastic gradient estimators that are fused over the network to track the global gradient. Remarkably, GT-HSGD achieves a network topology-independent oracle complexity of $O(n^{-1}\\epsilon^{-3})$ when the required error tolerance $\\epsilon$ is small enough, leading to a linear speedup with respect to the centralized optimal online variance-reduced approaches that operate on a single node. Numerical experiments are provided to illustrate our main technical results.", "bibtex": "@InProceedings{pmlr-v139-xin21a,\n title = \t {A Hybrid Variance-Reduced Method for Decentralized Stochastic Non-Convex Optimization},\n author = {Xin, Ran and Khan, Usman and Kar, Soummya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11459--11469},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xin21a/xin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/xin21a.html},\n abstract = \t {This paper considers decentralized stochastic optimization over a network of $n$ nodes, where each node possesses a smooth non-convex local cost function and the goal of the networked nodes is to find an $\\epsilon$-accurate first-order stationary point of the sum of the local costs. We focus on an online setting, where each node accesses its local cost only by means of a stochastic first-order oracle that returns a noisy version of the exact gradient. In this context, we propose a novel single-loop decentralized hybrid variance-reduced stochastic gradient method, called GT-HSGD, that outperforms the existing approaches in terms of both the oracle complexity and practical implementation. The GT-HSGD algorithm implements specialized local hybrid stochastic gradient estimators that are fused over the network to track the global gradient. Remarkably, GT-HSGD achieves a network topology-independent oracle complexity of $O(n^{-1}\\epsilon^{-3})$ when the required error tolerance $\\epsilon$ is small enough, leading to a linear speedup with respect to the centralized optimal online variance-reduced approaches that operate on a single node. Numerical experiments are provided to illustrate our main technical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/xin21a/xin21a.pdf", "supp": "", "pdf_size": 4718016, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8875150059321810042&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA+1; Department of Electrical and Computer Engineering, Tufts University, Medford, MA, USA+2; Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA+1", "aff_domain": "andrew.cmu.edu; ; ", "email": "andrew.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xin21a.html", "aff_unique_index": "0;2;0", "aff_unique_norm": "Carnegie Mellon University;;Tufts University", "aff_unique_dep": "Department of Electrical and Computer Engineering;;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.cmu.edu;;https://www.tufts.edu", "aff_unique_abbr": "CMU;;Tufts", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "Pittsburgh;;Medford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States;" }, { "title": "A Language for Counterfactual Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9997", "id": "9997", "proceeding": "http://proceedings.mlr.press/v139/tavares21a.html", "slides": "", "author_site": "Zenna Tavares, James Koppel, Xin Zhang, Ria Das, Armando Solar-Lezama", "author": "Zenna Tavares; James Koppel; Xin Zhang; Ria Das; Armando Solar-Lezama", "abstract": "We present Omega, a probabilistic programming language with support for counterfactual inference. Counterfactual inference means to observe some fact in the present, and infer what would have happened had some past intervention been taken, e.g. \u201cgiven that medication was not effective at dose x, what is the probability that it would have been effective at dose 2x?.\u201d We accomplish this by introducing a new operator to probabilistic programming akin to Pearl\u2019s do, define its formal semantics, provide an implementation, and demonstrate its utility through examples in a variety of simulation models.", "bibtex": "@InProceedings{pmlr-v139-tavares21a,\n title = \t {A Language for Counterfactual Generative Models},\n author = {Tavares, Zenna and Koppel, James and Zhang, Xin and Das, Ria and Solar-Lezama, Armando},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10173--10182},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tavares21a/tavares21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tavares21a.html},\n abstract = \t {We present Omega, a probabilistic programming language with support for counterfactual inference. Counterfactual inference means to observe some fact in the present, and infer what would have happened had some past intervention been taken, e.g. \u201cgiven that medication was not effective at dose x, what is the probability that it would have been effective at dose 2x?.\u201d We accomplish this by introducing a new operator to probabilistic programming akin to Pearl\u2019s do, define its formal semantics, provide an implementation, and demonstrate its utility through examples in a variety of simulation models.}\n}", "pdf": "http://proceedings.mlr.press/v139/tavares21a/tavares21a.pdf", "supp": "", "pdf_size": 2590541, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2067748786482591497&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "CSAIL, MIT, USA; CSAIL, MIT, USA; Key Lab of High Confidence Software Technologies, Ministry of Education, Department of Computer Science and Technology, Peking University, China; CSAIL, MIT, USA; CSAIL, MIT, USA", "aff_domain": "csail.mit.edu; ;pku.edu.cn; ; ", "email": "csail.mit.edu; ;pku.edu.cn; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/tavares21a.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "Massachusetts Institute of Technology;Peking University", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Department of Computer Science and Technology", "aff_unique_url": "https://www.csail.mit.edu;http://www.pku.edu.cn", "aff_unique_abbr": "MIT;Peking University", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "A Lower Bound for the Sample Complexity of Inverse Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9833", "id": "9833", "proceeding": "http://proceedings.mlr.press/v139/komanduru21a.html", "slides": "", "author_site": "Abi Komanduru, Jean Honorio", "author": "Abi Komanduru; Jean Honorio", "abstract": "Inverse reinforcement learning (IRL) is the task of finding a reward function that generates a desired optimal policy for a given Markov Decision Process (MDP). This paper develops an information-theoretic lower bound for the sample complexity of the finite state, finite action IRL problem. A geometric construction of $\\beta$-strict separable IRL problems using spherical codes is considered. Properties of the ensemble size as well as the Kullback-Leibler divergence between the generated trajectories are derived. The resulting ensemble is then used along with Fano\u2019s inequality to derive a sample complexity lower bound of $O(n \\log n)$, where $n$ is the number of states in the MDP.", "bibtex": "@InProceedings{pmlr-v139-komanduru21a,\n title = \t {A Lower Bound for the Sample Complexity of Inverse Reinforcement Learning},\n author = {Komanduru, Abi and Honorio, Jean},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5676--5685},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/komanduru21a/komanduru21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/komanduru21a.html},\n abstract = \t {Inverse reinforcement learning (IRL) is the task of finding a reward function that generates a desired optimal policy for a given Markov Decision Process (MDP). This paper develops an information-theoretic lower bound for the sample complexity of the finite state, finite action IRL problem. A geometric construction of $\\beta$-strict separable IRL problems using spherical codes is considered. Properties of the ensemble size as well as the Kullback-Leibler divergence between the generated trajectories are derived. The resulting ensemble is then used along with Fano\u2019s inequality to derive a sample complexity lower bound of $O(n \\log n)$, where $n$ is the number of states in the MDP.}\n}", "pdf": "http://proceedings.mlr.press/v139/komanduru21a/komanduru21a.pdf", "supp": "", "pdf_size": 281488, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9299735328813438375&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Purdue University, Indiana, USA; Purdue University, Indiana, USA", "aff_domain": "purdue.edu; ", "email": "purdue.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/komanduru21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Purdue University", "aff_unique_dep": "", "aff_unique_url": "https://www.purdue.edu", "aff_unique_abbr": "Purdue", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Indiana", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "A Modular Analysis of Provable Acceleration via Polyak\u2019s Momentum: Training a Wide ReLU Network and a Deep Linear Network", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10149", "id": "10149", "proceeding": "http://proceedings.mlr.press/v139/wang21n.html", "slides": "", "author_site": "Jun-Kun Wang, Chi-Heng Lin, Jacob Abernethy", "author": "Jun-Kun Wang; Chi-Heng Lin; Jacob D Abernethy", "abstract": "Incorporating a so-called \u201cmomentum\u201d dynamic in gradient descent methods is widely used in neural net training as it has been broadly observed that, at least empirically, it often leads to significantly faster convergence. At the same time, there are very few theoretical guarantees in the literature to explain this apparent acceleration effect. Even for the classical strongly convex quadratic problems, several existing results only show Polyak\u2019s momentum has an accelerated linear rate asymptotically. In this paper, we first revisit the quadratic problems and show a non-asymptotic accelerated linear rate of Polyak\u2019s momentum. Then, we provably show that Polyak\u2019s momentum achieves acceleration for training a one-layer wide ReLU network and a deep linear network, which are perhaps the two most popular canonical models for studying optimization and deep learning in the literature. Prior works (Du et al. 2019) and (Wu et al. 2019) showed that using vanilla gradient descent, and with an use of over-parameterization, the error decays as $(1- \\Theta(\\frac{1}{ \\kappa\u2019}))^t$ after $t$ iterations, where $\\kappa\u2019$ is the condition number of a Gram Matrix. Our result shows that with the appropriate choice of parameters Polyak\u2019s momentum has a rate of $(1-\\Theta(\\frac{1}{\\sqrt{\\kappa\u2019}}))^t$. For the deep linear network, prior work (Hu et al. 2020) showed that vanilla gradient descent has a rate of $(1-\\Theta(\\frac{1}{\\kappa}))^t$, where $\\kappa$ is the condition number of a data matrix. Our result shows an acceleration rate $(1- \\Theta(\\frac{1}{\\sqrt{\\kappa}}))^t$ is achievable by Polyak\u2019s momentum. This work establishes that momentum does indeed speed up neural net training.", "bibtex": "@InProceedings{pmlr-v139-wang21n,\n title = \t {A Modular Analysis of Provable Acceleration via Polyak\u2019s Momentum: Training a Wide ReLU Network and a Deep Linear Network},\n author = {Wang, Jun-Kun and Lin, Chi-Heng and Abernethy, Jacob D},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10816--10827},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21n/wang21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21n.html},\n abstract = \t {Incorporating a so-called \u201cmomentum\u201d dynamic in gradient descent methods is widely used in neural net training as it has been broadly observed that, at least empirically, it often leads to significantly faster convergence. At the same time, there are very few theoretical guarantees in the literature to explain this apparent acceleration effect. Even for the classical strongly convex quadratic problems, several existing results only show Polyak\u2019s momentum has an accelerated linear rate asymptotically. In this paper, we first revisit the quadratic problems and show a non-asymptotic accelerated linear rate of Polyak\u2019s momentum. Then, we provably show that Polyak\u2019s momentum achieves acceleration for training a one-layer wide ReLU network and a deep linear network, which are perhaps the two most popular canonical models for studying optimization and deep learning in the literature. Prior works (Du et al. 2019) and (Wu et al. 2019) showed that using vanilla gradient descent, and with an use of over-parameterization, the error decays as $(1- \\Theta(\\frac{1}{ \\kappa\u2019}))^t$ after $t$ iterations, where $\\kappa\u2019$ is the condition number of a Gram Matrix. Our result shows that with the appropriate choice of parameters Polyak\u2019s momentum has a rate of $(1-\\Theta(\\frac{1}{\\sqrt{\\kappa\u2019}}))^t$. For the deep linear network, prior work (Hu et al. 2020) showed that vanilla gradient descent has a rate of $(1-\\Theta(\\frac{1}{\\kappa}))^t$, where $\\kappa$ is the condition number of a data matrix. Our result shows an acceleration rate $(1- \\Theta(\\frac{1}{\\sqrt{\\kappa}}))^t$ is achievable by Polyak\u2019s momentum. This work establishes that momentum does indeed speed up neural net training.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21n/wang21n.pdf", "supp": "", "pdf_size": 280332, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=82918287465875898&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "School of Computer Science, Georgia Institute of Technology; School of Electrical and Computer Engineering, Georgia Institute of Technology; School of Computer Science, Georgia Institute of Technology", "aff_domain": "gatech.edu;gatech.edu;gatech.edu", "email": "gatech.edu;gatech.edu;gatech.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wang21n.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A New Formalism, Method and Open Issues for Zero-Shot Coordination", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9983", "id": "9983", "proceeding": "http://proceedings.mlr.press/v139/treutlein21a.html", "slides": "", "author_site": "Johannes Treutlein, Michael Dennis, Caspar Oesterheld, Jakob Foerster", "author": "Johannes Treutlein; Michael Dennis; Caspar Oesterheld; Jakob Foerster", "abstract": "In many coordination problems, independently reasoning humans are able to discover mutually compatible policies. In contrast, independently trained self-play policies are often mutually incompatible. Zero-shot coordination (ZSC) has recently been proposed as a new frontier in multi-agent reinforcement learning to address this fundamental issue. Prior work approaches the ZSC problem by assuming players can agree on a shared learning algorithm but not on labels for actions and observations, and proposes other-play as an optimal solution. However, until now, this \u201clabel-free\u201d problem has only been informally defined. We formalize this setting as the label-free coordination (LFC) problem by defining the label-free coordination game. We show that other-play is not an optimal solution to the LFC problem as it fails to consistently break ties between incompatible maximizers of the other-play objective. We introduce an extension of the algorithm, other-play with tie-breaking, and prove that it is optimal in the LFC problem and an equilibrium in the LFC game. Since arbitrary tie-breaking is precisely what the ZSC setting aims to prevent, we conclude that the LFC problem does not reflect the aims of ZSC. To address this, we introduce an alternative informal operationalization of ZSC as a starting point for future work.", "bibtex": "@InProceedings{pmlr-v139-treutlein21a,\n title = \t {A New Formalism, Method and Open Issues for Zero-Shot Coordination},\n author = {Treutlein, Johannes and Dennis, Michael and Oesterheld, Caspar and Foerster, Jakob},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10413--10423},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/treutlein21a/treutlein21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/treutlein21a.html},\n abstract = \t {In many coordination problems, independently reasoning humans are able to discover mutually compatible policies. In contrast, independently trained self-play policies are often mutually incompatible. Zero-shot coordination (ZSC) has recently been proposed as a new frontier in multi-agent reinforcement learning to address this fundamental issue. Prior work approaches the ZSC problem by assuming players can agree on a shared learning algorithm but not on labels for actions and observations, and proposes other-play as an optimal solution. However, until now, this \u201clabel-free\u201d problem has only been informally defined. We formalize this setting as the label-free coordination (LFC) problem by defining the label-free coordination game. We show that other-play is not an optimal solution to the LFC problem as it fails to consistently break ties between incompatible maximizers of the other-play objective. We introduce an extension of the algorithm, other-play with tie-breaking, and prove that it is optimal in the LFC problem and an equilibrium in the LFC game. Since arbitrary tie-breaking is precisely what the ZSC setting aims to prevent, we conclude that the LFC problem does not reflect the aims of ZSC. To address this, we introduce an alternative informal operationalization of ZSC as a starting point for future work.}\n}", "pdf": "http://proceedings.mlr.press/v139/treutlein21a/treutlein21a.pdf", "supp": "", "pdf_size": 2799345, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7081499741440160815&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Toronto, Toronto, Canada + Vector Institute, Toronto, Canada; Department of Electrical Engineering and Computer Science, University of California, Berkeley, USA; Department of Computer Science, Duke University, Durham, USA; Department of Computer Science, University of Toronto, Toronto, Canada + Vector Institute, Toronto, Canada + Facebook AI Research, USA", "aff_domain": "cs.toronto.edu; ; ; ", "email": "cs.toronto.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/treutlein21a.html", "aff_unique_index": "0+1;2;3;0+1+4", "aff_unique_norm": "University of Toronto;Vector Institute;University of California, Berkeley;Duke University;Meta", "aff_unique_dep": "Department of Computer Science;;Department of Electrical Engineering and Computer Science;Department of Computer Science;Facebook AI Research", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai;https://www.berkeley.edu;https://www.duke.edu;https://research.facebook.com", "aff_unique_abbr": "U of T;Vector Institute;UC Berkeley;Duke;FAIR", "aff_campus_unique_index": "0+0;1;2;0+0", "aff_campus_unique": "Toronto;Berkeley;Durham;", "aff_country_unique_index": "0+0;1;1;0+0+1", "aff_country_unique": "Canada;United States" }, { "title": "A New Representation of Successor Features for Transfer across Dissimilar Environments", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10753", "id": "10753", "proceeding": "http://proceedings.mlr.press/v139/abdolshah21a.html", "slides": "", "author_site": "Majid Abdolshah, Hung Le, Thommen Karimpanal George, Sunil Gupta, Santu Rana, Svetha Venkatesh", "author": "Majid Abdolshah; Hung Le; Thommen Karimpanal George; Sunil Gupta; Santu Rana; Svetha Venkatesh", "abstract": "Transfer in reinforcement learning is usually achieved through generalisation across tasks. Whilst many studies have investigated transferring knowledge when the reward function changes, they have assumed that the dynamics of the environments remain consistent. Many real-world RL problems require transfer among environments with different dynamics. To address this problem, we propose an approach based on successor features in which we model successor feature functions with Gaussian Processes permitting the source successor features to be treated as noisy measurements of the target successor feature function. Our theoretical analysis proves the convergence of this approach as well as the bounded error on modelling successor feature functions with Gaussian Processes in environments with both different dynamics and rewards. We demonstrate our method on benchmark datasets and show that it outperforms current baselines.", "bibtex": "@InProceedings{pmlr-v139-abdolshah21a,\n title = \t {A New Representation of Successor Features for Transfer across Dissimilar Environments},\n author = {Abdolshah, Majid and Le, Hung and George, Thommen Karimpanal and Gupta, Sunil and Rana, Santu and Venkatesh, Svetha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1--9},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/abdolshah21a/abdolshah21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/abdolshah21a.html},\n abstract = \t {Transfer in reinforcement learning is usually achieved through generalisation across tasks. Whilst many studies have investigated transferring knowledge when the reward function changes, they have assumed that the dynamics of the environments remain consistent. Many real-world RL problems require transfer among environments with different dynamics. To address this problem, we propose an approach based on successor features in which we model successor feature functions with Gaussian Processes permitting the source successor features to be treated as noisy measurements of the target successor feature function. Our theoretical analysis proves the convergence of this approach as well as the bounded error on modelling successor feature functions with Gaussian Processes in environments with both different dynamics and rewards. We demonstrate our method on benchmark datasets and show that it outperforms current baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/abdolshah21a/abdolshah21a.pdf", "supp": "", "pdf_size": 1268580, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15810564999746983816&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia; Applied Artificial Intelligence Institute (A2I2), Deakin University, Geelong, Australia", "aff_domain": "deakin.edu.au; ; ; ; ; ", "email": "deakin.edu.au; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/abdolshah21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Deakin University", "aff_unique_dep": "Applied Artificial Intelligence Institute (A2I2)", "aff_unique_url": "https://www.deakin.edu.au", "aff_unique_abbr": "Deakin", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Geelong", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "Australia" }, { "title": "A Novel Method to Solve Neural Knapsack Problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10715", "id": "10715", "proceeding": "http://proceedings.mlr.press/v139/li21m.html", "slides": "", "author_site": "Duanshun Li, Jing Liu, Dongeun Lee, Ali S. Mazloom, Giridhar Kaushik, Kookjin Lee, Noseong Park", "author": "Duanshun Li; Jing Liu; Dongeun Lee; Ali Seyedmazloom; Giridhar Kaushik; Kookjin Lee; Noseong Park", "abstract": "0-1 knapsack is of fundamental importance across many fields. In this paper, we present a game-theoretic method to solve 0-1 knapsack problems (KPs) where the number of items (products) is large and the values of items are not predetermined but decided by an external value assignment function (e.g., a neural network in our case) during the optimization process. While existing papers are interested in predicting solutions with neural networks for classical KPs whose objective functions are mostly linear functions, we are interested in solving KPs whose objective functions are neural networks. In other words, we choose a subset of items that maximize the sum of the values predicted by neural networks. Its key challenge is how to optimize the neural network-based non-linear KP objective with a budget constraint. Our solution is inspired by game-theoretic approaches in deep learning, e.g., generative adversarial networks. After formally defining our two-player game, we develop an adaptive gradient ascent method to solve it. In our experiments, our method successfully solves two neural network-based non-linear KPs and conventional linear KPs with 1 million items.", "bibtex": "@InProceedings{pmlr-v139-li21m,\n title = \t {A Novel Method to Solve Neural Knapsack Problems},\n author = {Li, Duanshun and Liu, Jing and Lee, Dongeun and Seyedmazloom, Ali and Kaushik, Giridhar and Lee, Kookjin and Park, Noseong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6414--6424},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21m/li21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21m.html},\n abstract = \t {0-1 knapsack is of fundamental importance across many fields. In this paper, we present a game-theoretic method to solve 0-1 knapsack problems (KPs) where the number of items (products) is large and the values of items are not predetermined but decided by an external value assignment function (e.g., a neural network in our case) during the optimization process. While existing papers are interested in predicting solutions with neural networks for classical KPs whose objective functions are mostly linear functions, we are interested in solving KPs whose objective functions are neural networks. In other words, we choose a subset of items that maximize the sum of the values predicted by neural networks. Its key challenge is how to optimize the neural network-based non-linear KP objective with a budget constraint. Our solution is inspired by game-theoretic approaches in deep learning, e.g., generative adversarial networks. After formally defining our two-player game, we develop an adaptive gradient ascent method to solve it. In our experiments, our method successfully solves two neural network-based non-linear KPs and conventional linear KPs with 1 million items.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21m/li21m.pdf", "supp": "", "pdf_size": 1180650, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6565969573531807386&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Alberta; Walmart Labs.; Texas AM University-Commerce; George Mason University; George Mason University; Arizona State University; Yonsei University", "aff_domain": "yonsei.ac.kr; ; ; ; ; ;yonsei.ac.kr", "email": "yonsei.ac.kr; ; ; ; ; ;yonsei.ac.kr", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/li21m.html", "aff_unique_index": "0;1;2;3;3;4;5", "aff_unique_norm": "University of Alberta;Walmart;Texas A&M University-Commerce;George Mason University;Arizona State University;Yonsei University", "aff_unique_dep": ";Walmart Labs;;;;", "aff_unique_url": "https://www.ualberta.ca;https://www.walmart.com;https://www.tamuc.edu/;https://www.gmu.edu;https://www.asu.edu;https://www.yonsei.ac.kr", "aff_unique_abbr": "UAlberta;Walmart Labs;TAMUC;GMU;ASU;Yonsei", "aff_campus_unique_index": "1", "aff_campus_unique": ";Commerce", "aff_country_unique_index": "0;1;1;1;1;1;2", "aff_country_unique": "Canada;United States;South Korea" }, { "title": "A Novel Sequential Coreset Method for Gradient Descent Algorithms", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10767", "id": "10767", "proceeding": "http://proceedings.mlr.press/v139/huang21b.html", "slides": "", "author_site": "Jiawei Huang, Ruomin Huang, wenjie liu, Nikolaos Freris, Hu Ding", "author": "Jiawei Huang; Ruomin Huang; Wenjie Liu; Nikolaos Freris; Hu Ding", "abstract": "A wide range of optimization problems arising in machine learning can be solved by gradient descent algorithms, and a central question in this area is how to efficiently compress a large-scale dataset so as to reduce the computational complexity. Coreset is a popular data compression technique that has been extensively studied before. However, most of existing coreset methods are problem-dependent and cannot be used as a general tool for a broader range of applications. A key obstacle is that they often rely on the pseudo-dimension and total sensitivity bound that can be very high or hard to obtain. In this paper, based on the \u201clocality\u201d property of gradient descent algorithms, we propose a new framework, termed \u201csequential coreset\u201d, which effectively avoids these obstacles. Moreover, our method is particularly suitable for sparse optimization whence the coreset size can be further reduced to be only poly-logarithmically dependent on the dimension. In practice, the experimental results suggest that our method can save a large amount of running time compared with the baseline algorithms.", "bibtex": "@InProceedings{pmlr-v139-huang21b,\n title = \t {A Novel Sequential Coreset Method for Gradient Descent Algorithms},\n author = {Huang, Jiawei and Huang, Ruomin and Liu, Wenjie and Freris, Nikolaos and Ding, Hu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4412--4422},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21b/huang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21b.html},\n abstract = \t {A wide range of optimization problems arising in machine learning can be solved by gradient descent algorithms, and a central question in this area is how to efficiently compress a large-scale dataset so as to reduce the computational complexity. Coreset is a popular data compression technique that has been extensively studied before. However, most of existing coreset methods are problem-dependent and cannot be used as a general tool for a broader range of applications. A key obstacle is that they often rely on the pseudo-dimension and total sensitivity bound that can be very high or hard to obtain. In this paper, based on the \u201clocality\u201d property of gradient descent algorithms, we propose a new framework, termed \u201csequential coreset\u201d, which effectively avoids these obstacles. Moreover, our method is particularly suitable for sparse optimization whence the coreset size can be further reduced to be only poly-logarithmically dependent on the dimension. In practice, the experimental results suggest that our method can save a large amount of running time compared with the baseline algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21b/huang21b.pdf", "supp": "", "pdf_size": 525046, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16591386635432685797&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computer Science and Technology, University of Science and Technology of China, Anhui, China + School of Data Science, University of Science and Technology of China, Anhui, China; School of Data Science, University of Science and Technology of China, Anhui, China; School of Computer Science and Technology, University of Science and Technology of China, Anhui, China; School of Computer Science and Technology, University of Science and Technology of China, Anhui, China; School of Computer Science and Technology, University of Science and Technology of China, Anhui, China", "aff_domain": "ustc.edu.cn; ; ; ;ustc.edu.cn", "email": "ustc.edu.cn; ; ; ;ustc.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/huang21b.html", "aff_unique_index": "0+0;0;0;0;0", "aff_unique_norm": "University of Science and Technology of China", "aff_unique_dep": "School of Computer Science and Technology", "aff_unique_url": "http://www.ustc.edu.cn", "aff_unique_abbr": "USTC", "aff_campus_unique_index": "0+0;0;0;0;0", "aff_campus_unique": "Anhui", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "China" }, { "title": "A Nullspace Property for Subspace-Preserving Recovery", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10703", "id": "10703", "proceeding": "http://proceedings.mlr.press/v139/kaba21a.html", "slides": "", "author_site": "Mustafa D Kaba, Chong You, Daniel Robinson, Enrique Mallada, Rene Vidal", "author": "Mustafa D Kaba; Chong You; Daniel P Robinson; Enrique Mallada; Rene Vidal", "abstract": "Much of the theory for classical sparse recovery is based on conditions on the dictionary that are both necessary and sufficient (e.g., nullspace property) or only sufficient (e.g., incoherence and restricted isometry). In contrast, much of the theory for subspace-preserving recovery, the theoretical underpinnings for sparse subspace classification and clustering methods, is based on conditions on the subspaces and the data that are only sufficient (e.g., subspace incoherence and data inner-radius). This paper derives a necessary and sufficient condition for subspace-preserving recovery that is inspired by the classical nullspace property.Based on this novel condition, called here the subspace nullspace property, we derive equivalent characterizations that either admit a clear geometric interpretation that relates data distribution and subspace separation to the recovery success, or can be verified using a finite set of extreme points of a properly defined set. We further exploit these characterizations to derive new sufficient conditions, based on inner-radius and outer-radius measures and dual bounds, that generalize existing conditions and preserve the geometric interpretations. These results fill an important gap in the subspace-preserving recovery literature.", "bibtex": "@InProceedings{pmlr-v139-kaba21a,\n title = \t {A Nullspace Property for Subspace-Preserving Recovery},\n author = {Kaba, Mustafa D and You, Chong and Robinson, Daniel P and Mallada, Enrique and Vidal, Rene},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5180--5188},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kaba21a/kaba21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kaba21a.html},\n abstract = \t {Much of the theory for classical sparse recovery is based on conditions on the dictionary that are both necessary and sufficient (e.g., nullspace property) or only sufficient (e.g., incoherence and restricted isometry). In contrast, much of the theory for subspace-preserving recovery, the theoretical underpinnings for sparse subspace classification and clustering methods, is based on conditions on the subspaces and the data that are only sufficient (e.g., subspace incoherence and data inner-radius). This paper derives a necessary and sufficient condition for subspace-preserving recovery that is inspired by the classical nullspace property.Based on this novel condition, called here the subspace nullspace property, we derive equivalent characterizations that either admit a clear geometric interpretation that relates data distribution and subspace separation to the recovery success, or can be verified using a finite set of extreme points of a properly defined set. We further exploit these characterizations to derive new sufficient conditions, based on inner-radius and outer-radius measures and dual bounds, that generalize existing conditions and preserve the geometric interpretations. These results fill an important gap in the subspace-preserving recovery literature.}\n}", "pdf": "http://proceedings.mlr.press/v139/kaba21a/kaba21a.pdf", "supp": "", "pdf_size": 391340, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15416351622153555951&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "eBay Inc., San Jose, CA, USA; Dept. of Elect. Eng. & Comp. Sci., University of California at Berkeley, Berkeley, CA, USA; Dept. of Ind. & Sys. Eng., Lehigh University, Bethlehem, PA, USA; Dept. of Elect. & Comp. Eng., Johns Hopkins University, Baltimore, MD, USA; MINDS & Dept. of Biom. Eng., Johns Hopkins University, Baltimore, MD, USA", "aff_domain": "ebay.com; ; ; ; ", "email": "ebay.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kaba21a.html", "aff_unique_index": "0;1;2;3;3", "aff_unique_norm": "eBay Inc.;University of California, Berkeley;Lehigh University;Johns Hopkins University", "aff_unique_dep": ";Department of Electrical Engineering and Computer Sciences;Department of Industrial and Systems Engineering;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ebayinc.com;https://www.berkeley.edu;https://www.lehigh.edu;https://www.jhu.edu", "aff_unique_abbr": "eBay;UC Berkeley;Lehigh;JHU", "aff_campus_unique_index": "0;1;2;3;3", "aff_campus_unique": "San Jose;Berkeley;Bethlehem;Baltimore", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "A Policy Gradient Algorithm for Learning to Learn in Multiagent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10153", "id": "10153", "proceeding": "http://proceedings.mlr.press/v139/kim21g.html", "slides": "", "author_site": "Dong Ki Kim, Miao Liu, Matthew Riemer, Chuangchuang Sun, Marwa Abdulhai, Golnaz Habibi, Sebastian Lopez-Cot, Gerald Tesauro, Jonathan How", "author": "Dong Ki Kim; Miao Liu; Matthew D Riemer; Chuangchuang Sun; Marwa Abdulhai; Golnaz Habibi; Sebastian Lopez-Cot; Gerald Tesauro; Jonathan How", "abstract": "A fundamental challenge in multiagent reinforcement learning is to learn beneficial behaviors in a shared environment with other simultaneously learning agents. In particular, each agent perceives the environment as effectively non-stationary due to the changing policies of other agents. Moreover, each agent is itself constantly learning, leading to natural non-stationarity in the distribution of experiences encountered. In this paper, we propose a novel meta-multiagent policy gradient theorem that directly accounts for the non-stationary policy dynamics inherent to multiagent learning settings. This is achieved by modeling our gradient updates to consider both an agent\u2019s own non-stationary policy dynamics and the non-stationary policy dynamics of other agents in the environment. We show that our theoretically grounded approach provides a general solution to the multiagent learning problem, which inherently comprises all key aspects of previous state of the art approaches on this topic. We test our method on a diverse suite of multiagent benchmarks and demonstrate a more efficient ability to adapt to new agents as they learn than baseline methods across the full spectrum of mixed incentive, competitive, and cooperative domains.", "bibtex": "@InProceedings{pmlr-v139-kim21g,\n title = \t {A Policy Gradient Algorithm for Learning to Learn in Multiagent Reinforcement Learning},\n author = {Kim, Dong Ki and Liu, Miao and Riemer, Matthew D and Sun, Chuangchuang and Abdulhai, Marwa and Habibi, Golnaz and Lopez-Cot, Sebastian and Tesauro, Gerald and How, Jonathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5541--5550},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21g/kim21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21g.html},\n abstract = \t {A fundamental challenge in multiagent reinforcement learning is to learn beneficial behaviors in a shared environment with other simultaneously learning agents. In particular, each agent perceives the environment as effectively non-stationary due to the changing policies of other agents. Moreover, each agent is itself constantly learning, leading to natural non-stationarity in the distribution of experiences encountered. In this paper, we propose a novel meta-multiagent policy gradient theorem that directly accounts for the non-stationary policy dynamics inherent to multiagent learning settings. This is achieved by modeling our gradient updates to consider both an agent\u2019s own non-stationary policy dynamics and the non-stationary policy dynamics of other agents in the environment. We show that our theoretically grounded approach provides a general solution to the multiagent learning problem, which inherently comprises all key aspects of previous state of the art approaches on this topic. We test our method on a diverse suite of multiagent benchmarks and demonstrate a more efficient ability to adapt to new agents as they learn than baseline methods across the full spectrum of mixed incentive, competitive, and cooperative domains.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21g/kim21g.pdf", "supp": "", "pdf_size": 1219472, "gs_citation": 85, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9520170531989775101&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "MIT-LIDS+MIT-IBM Watson AI Lab+IBM-Research; MIT-IBM Watson AI Lab+IBM-Research; MIT-IBM Watson AI Lab+IBM-Research; MIT-LIDS+MIT-IBM Watson AI Lab; MIT-LIDS+MIT-IBM Watson AI Lab; MIT-LIDS+MIT-IBM Watson AI Lab; MIT-LIDS+MIT-IBM Watson AI Lab; MIT-IBM Watson AI Lab+IBM-Research; MIT-LIDS+MIT-IBM Watson AI Lab", "aff_domain": "mit.edu;ibm.com; ; ; ; ; ; ; ", "email": "mit.edu;ibm.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/kim21g.html", "aff_unique_index": "0+0+1;0+1;0+1;0+0;0+0;0+0;0+0;0+1;0+0", "aff_unique_norm": "Massachusetts Institute of Technology;IBM", "aff_unique_dep": "Laboratory for Information & Decision Systems;IBM Research", "aff_unique_url": "http://web.mit.edu/lids;https://www.ibm.com/research", "aff_unique_abbr": "MIT;IBM Research", "aff_campus_unique_index": ";;;;;;;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0;0+0;0+0;0+0;0+0;0+0;0+0;0+0", "aff_country_unique": "United States" }, { "title": "A Practical Method for Constructing Equivariant Multilayer Perceptrons for Arbitrary Matrix Groups", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9727", "id": "9727", "proceeding": "http://proceedings.mlr.press/v139/finzi21a.html", "slides": "", "author_site": "Marc Finzi, Max Welling, Andrew Wilson", "author": "Marc Finzi; Max Welling; Andrew Gordon Wilson", "abstract": "Symmetries and equivariance are fundamental to the generalization of neural networks on domains such as images, graphs, and point clouds. Existing work has primarily focused on a small number of groups, such as the translation, rotation, and permutation groups. In this work we provide a completely general algorithm for solving for the equivariant layers of matrix groups. In addition to recovering solutions from other works as special cases, we construct multilayer perceptrons equivariant to multiple groups that have never been tackled before, including $\\mathrm{O}(1,3)$, $\\mathrm{O}(5)$, $\\mathrm{Sp}(n)$, and the Rubik\u2019s cube group. Our approach outperforms non-equivariant baselines, with applications to particle physics and modeling dynamical systems. We release our software library to enable researchers to construct equivariant layers for arbitrary", "bibtex": "@InProceedings{pmlr-v139-finzi21a,\n title = \t {A Practical Method for Constructing Equivariant Multilayer Perceptrons for Arbitrary Matrix Groups},\n author = {Finzi, Marc and Welling, Max and Wilson, Andrew Gordon Gordon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3318--3328},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/finzi21a/finzi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/finzi21a.html},\n abstract = \t {Symmetries and equivariance are fundamental to the generalization of neural networks on domains such as images, graphs, and point clouds. Existing work has primarily focused on a small number of groups, such as the translation, rotation, and permutation groups. In this work we provide a completely general algorithm for solving for the equivariant layers of matrix groups. In addition to recovering solutions from other works as special cases, we construct multilayer perceptrons equivariant to multiple groups that have never been tackled before, including $\\mathrm{O}(1,3)$, $\\mathrm{O}(5)$, $\\mathrm{Sp}(n)$, and the Rubik\u2019s cube group. Our approach outperforms non-equivariant baselines, with applications to particle physics and modeling dynamical systems. We release our software library to enable researchers to construct equivariant layers for arbitrary}\n}", "pdf": "http://proceedings.mlr.press/v139/finzi21a/finzi21a.pdf", "supp": "", "pdf_size": 682942, "gs_citation": 217, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7699207538683831568&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "New York University; University of Amsterdam; New York University", "aff_domain": "nyu.edu; ;cims.nyu.edu", "email": "nyu.edu; ;cims.nyu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/finzi21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "New York University;University of Amsterdam", "aff_unique_dep": ";", "aff_unique_url": "https://www.nyu.edu;https://www.uva.nl", "aff_unique_abbr": "NYU;UvA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Netherlands" }, { "title": "A Precise Performance Analysis of Support Vector Regression", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9883", "id": "9883", "proceeding": "http://proceedings.mlr.press/v139/sifaou21a.html", "slides": "", "author_site": "Houssem Sifaou, Abla Kammoun, Mohamed-Slim Alouini", "author": "Houssem Sifaou; Abla Kammoun; Mohamed-Slim Alouini", "abstract": "In this paper, we study the hard and soft support vector regression techniques applied to a set of $n$ linear measurements of the form $y_i=\\boldsymbol{\\beta}_\\star^{T}{\\bf x}_i +n_i$ where $\\boldsymbol{\\beta}_\\star$ is an unknown vector, $\\left\\{{\\bf x}_i\\right\\}_{i=1}^n$ are the feature vectors and $\\left\\{{n}_i\\right\\}_{i=1}^n$ model the noise. Particularly, under some plausible assumptions on the statistical distribution of the data, we characterize the feasibility condition for the hard support vector regression in the regime of high dimensions and, when feasible, derive an asymptotic approximation for its risk. Similarly, we study the test risk for the soft support vector regression as a function of its parameters. Our results are then used to optimally tune the parameters intervening in the design of hard and soft support vector regression algorithms. Based on our analysis, we illustrate that adding more samples may be harmful to the test performance of support vector regression, while it is always beneficial when the parameters are optimally selected. Such a result reminds a similar phenomenon observed in modern learning architectures according to which optimally tuned architectures present a decreasing test performance curve with respect to the number of samples.", "bibtex": "@InProceedings{pmlr-v139-sifaou21a,\n title = \t {A Precise Performance Analysis of Support Vector Regression},\n author = {Sifaou, Houssem and Kammoun, Abla and Alouini, Mohamed-Slim},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9671--9680},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sifaou21a/sifaou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sifaou21a.html},\n abstract = \t {In this paper, we study the hard and soft support vector regression techniques applied to a set of $n$ linear measurements of the form $y_i=\\boldsymbol{\\beta}_\\star^{T}{\\bf x}_i +n_i$ where $\\boldsymbol{\\beta}_\\star$ is an unknown vector, $\\left\\{{\\bf x}_i\\right\\}_{i=1}^n$ are the feature vectors and $\\left\\{{n}_i\\right\\}_{i=1}^n$ model the noise. Particularly, under some plausible assumptions on the statistical distribution of the data, we characterize the feasibility condition for the hard support vector regression in the regime of high dimensions and, when feasible, derive an asymptotic approximation for its risk. Similarly, we study the test risk for the soft support vector regression as a function of its parameters. Our results are then used to optimally tune the parameters intervening in the design of hard and soft support vector regression algorithms. Based on our analysis, we illustrate that adding more samples may be harmful to the test performance of support vector regression, while it is always beneficial when the parameters are optimally selected. Such a result reminds a similar phenomenon observed in modern learning architectures according to which optimally tuned architectures present a decreasing test performance curve with respect to the number of samples.}\n}", "pdf": "http://proceedings.mlr.press/v139/sifaou21a/sifaou21a.pdf", "supp": "", "pdf_size": 534539, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17525415743330831792&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Computer, Electrical, and Mathematical Sciences & Engineering Division, King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia; Computer, Electrical, and Mathematical Sciences & Engineering Division, King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia; Computer, Electrical, and Mathematical Sciences & Engineering Division, King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "aff_domain": "kaust.edu.sa; ; ", "email": "kaust.edu.sa; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sifaou21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "King Abdullah University of Science and Technology", "aff_unique_dep": "Computer, Electrical, and Mathematical Sciences & Engineering Division", "aff_unique_url": "https://www.kaust.edu.sa", "aff_unique_abbr": "KAUST", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Thuwal", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Saudi Arabia" }, { "title": "A Probabilistic Approach to Neural Network Pruning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10317", "id": "10317", "proceeding": "http://proceedings.mlr.press/v139/qian21a.html", "slides": "", "author_site": "Xin Qian, Diego Klabjan", "author": "Xin Qian; Diego Klabjan", "abstract": "Neural network pruning techniques reduce the number of parameters without compromising predicting ability of a network. Many algorithms have been developed for pruning both over-parameterized fully-connected networks (FCN) and convolutional neural networks (CNN), but analytical studies of capabilities and compression ratios of such pruned sub-networks are lacking. We theoretically study the performance of two pruning techniques (random and magnitude-based) on FCN and CNN. Given a target network, we provide a universal approach to bound the gap between a pruned and the target network in a probabilistic sense, which is the first study of this nature. The results establish that there exist pruned networks with expressive power within any specified bound from the target network and with a significant compression ratio.", "bibtex": "@InProceedings{pmlr-v139-qian21a,\n title = \t {A Probabilistic Approach to Neural Network Pruning},\n author = {Qian, Xin and Klabjan, Diego},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8640--8649},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qian21a/qian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/qian21a.html},\n abstract = \t {Neural network pruning techniques reduce the number of parameters without compromising predicting ability of a network. Many algorithms have been developed for pruning both over-parameterized fully-connected networks (FCN) and convolutional neural networks (CNN), but analytical studies of capabilities and compression ratios of such pruned sub-networks are lacking. We theoretically study the performance of two pruning techniques (random and magnitude-based) on FCN and CNN. Given a target network, we provide a universal approach to bound the gap between a pruned and the target network in a probabilistic sense, which is the first study of this nature. The results establish that there exist pruned networks with expressive power within any specified bound from the target network and with a significant compression ratio.}\n}", "pdf": "http://proceedings.mlr.press/v139/qian21a/qian21a.pdf", "supp": "", "pdf_size": 406004, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10900929262585763348&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Industrial Engineering and Management Science, Northwestern University; Department of Industrial Engineering and Management Science, Northwestern University", "aff_domain": "u.northwestern.edu;northwestern.edu", "email": "u.northwestern.edu;northwestern.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/qian21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Northwestern University", "aff_unique_dep": "Department of Industrial Engineering and Management Science", "aff_unique_url": "https://www.northwestern.edu", "aff_unique_abbr": "NU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "A Proxy Variable View of Shared Confounding", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10489", "id": "10489", "proceeding": "http://proceedings.mlr.press/v139/wang21c.html", "slides": "", "author_site": "Yixin Wang, David Blei", "author": "Yixin Wang; David Blei", "abstract": "Causal inference from observational data can be biased by unobserved confounders. Confounders{\u2014}the variables that affect both the treatments and the outcome{\u2014}induce spurious non-causal correlations between the two. Without additional conditions, unobserved confounders generally make causal quantities hard to identify. In this paper, we focus on the setting where there are many treatments with shared confounding, and we study under what conditions is causal identification possible. The key observation is that we can view subsets of treatments as proxies of the unobserved confounder and identify the intervention distributions of the rest. Moreover, while existing identification formulas for proxy variables involve solving integral equations, we show that one can circumvent the need for such solutions by directly modeling the data. Finally, we extend these results to an expanded class of causal graphs, those with other confounders and selection variables.", "bibtex": "@InProceedings{pmlr-v139-wang21c,\n title = \t {A Proxy Variable View of Shared Confounding},\n author = {Wang, Yixin and Blei, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10697--10707},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21c/wang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21c.html},\n abstract = \t {Causal inference from observational data can be biased by unobserved confounders. Confounders{\u2014}the variables that affect both the treatments and the outcome{\u2014}induce spurious non-causal correlations between the two. Without additional conditions, unobserved confounders generally make causal quantities hard to identify. In this paper, we focus on the setting where there are many treatments with shared confounding, and we study under what conditions is causal identification possible. The key observation is that we can view subsets of treatments as proxies of the unobserved confounder and identify the intervention distributions of the rest. Moreover, while existing identification formulas for proxy variables involve solving integral equations, we show that one can circumvent the need for such solutions by directly modeling the data. Finally, we extend these results to an expanded class of causal graphs, those with other confounders and selection variables.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21c/wang21c.pdf", "supp": "", "pdf_size": 728725, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16935221196973164329&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wang21c.html" }, { "title": "A Receptor Skeleton for Capsule Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8915", "id": "8915", "proceeding": "http://proceedings.mlr.press/v139/chen21x.html", "slides": "", "author_site": "Jintai Chen, Hongyun Yu, Chengde Qian, Danny Z Chen, Jian Wu", "author": "Jintai Chen; Hongyun Yu; Chengde Qian; Danny Z Chen; Jian Wu", "abstract": "In previous Capsule Neural Networks (CapsNets), routing algorithms often performed clustering processes to assemble the child capsules\u2019 representations into parent capsules. Such routing algorithms were typically implemented with iterative processes and incurred high computing complexity. This paper presents a new capsule structure, which contains a set of optimizable receptors and a transmitter is devised on the capsule\u2019s representation. Specifically, child capsules\u2019 representations are sent to the parent capsules whose receptors match well the transmitters of the child capsules\u2019 representations, avoiding applying computationally complex routing algorithms. To ensure the receptors in a CapsNet work cooperatively, we build a skeleton to organize the receptors in different capsule layers in a CapsNet. The receptor skeleton assigns a share-out objective for each receptor, making the CapsNet perform as a hierarchical agglomerative clustering process. Comprehensive experiments verify that our approach facilitates efficient clustering processes, and CapsNets with our approach significantly outperform CapsNets with previous routing algorithms on image classification, affine transformation generalization, overlapped object recognition, and representation semantic decoupling.", "bibtex": "@InProceedings{pmlr-v139-chen21x,\n title = \t {A Receptor Skeleton for Capsule Neural Networks},\n author = {Chen, Jintai and Yu, Hongyun and Qian, Chengde and Chen, Danny Z and Wu, Jian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1781--1790},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21x/chen21x.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21x.html},\n abstract = \t {In previous Capsule Neural Networks (CapsNets), routing algorithms often performed clustering processes to assemble the child capsules\u2019 representations into parent capsules. Such routing algorithms were typically implemented with iterative processes and incurred high computing complexity. This paper presents a new capsule structure, which contains a set of optimizable receptors and a transmitter is devised on the capsule\u2019s representation. Specifically, child capsules\u2019 representations are sent to the parent capsules whose receptors match well the transmitters of the child capsules\u2019 representations, avoiding applying computationally complex routing algorithms. To ensure the receptors in a CapsNet work cooperatively, we build a skeleton to organize the receptors in different capsule layers in a CapsNet. The receptor skeleton assigns a share-out objective for each receptor, making the CapsNet perform as a hierarchical agglomerative clustering process. Comprehensive experiments verify that our approach facilitates efficient clustering processes, and CapsNets with our approach significantly outperform CapsNets with previous routing algorithms on image classification, affine transformation generalization, overlapped object recognition, and representation semantic decoupling.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21x/chen21x.pdf", "supp": "", "pdf_size": 1904406, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2101743799844762290&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/chen21x.html" }, { "title": "A Regret Minimization Approach to Iterative Learning Control", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9817", "id": "9817", "proceeding": "http://proceedings.mlr.press/v139/agarwal21b.html", "slides": "/media/icml-2021/Slides/9817.pdf", "author_site": "Naman Agarwal, Elad Hazan, Anirudha Majumdar, Karan Singh", "author": "Naman Agarwal; Elad Hazan; Anirudha Majumdar; Karan Singh", "abstract": "We consider the setting of iterative learning control, or model-based policy learning in the presence of uncertain, time-varying dynamics. In this setting, we propose a new performance metric, planning regret, which replaces the standard stochastic uncertainty assumptions with worst case regret. Based on recent advances in non-stochastic control, we design a new iterative algorithm for minimizing planning regret that is more robust to model mismatch and uncertainty. We provide theoretical and empirical evidence that the proposed algorithm outperforms existing methods on several benchmarks.", "bibtex": "@InProceedings{pmlr-v139-agarwal21b,\n title = \t {A Regret Minimization Approach to Iterative Learning Control},\n author = {Agarwal, Naman and Hazan, Elad and Majumdar, Anirudha and Singh, Karan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {100--109},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/agarwal21b/agarwal21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/agarwal21b.html},\n abstract = \t {We consider the setting of iterative learning control, or model-based policy learning in the presence of uncertain, time-varying dynamics. In this setting, we propose a new performance metric, planning regret, which replaces the standard stochastic uncertainty assumptions with worst case regret. Based on recent advances in non-stochastic control, we design a new iterative algorithm for minimizing planning regret that is more robust to model mismatch and uncertainty. We provide theoretical and empirical evidence that the proposed algorithm outperforms existing methods on several benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/agarwal21b/agarwal21b.pdf", "supp": "", "pdf_size": 505309, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9938905700564054779&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Google AI Princeton, Princeton, NJ, USA+Princeton University, Princeton, NJ, USA; Google AI Princeton, Princeton, NJ, USA+Princeton University, Princeton, NJ, USA; Google AI Princeton, Princeton, NJ, USA+Princeton University, Princeton, NJ, USA; Microsoft Research, Redmond, WA, USA", "aff_domain": "google.com;cs.princeton.edu;princeton.edu;microsoft.com", "email": "google.com;cs.princeton.edu;princeton.edu;microsoft.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/agarwal21b.html", "aff_unique_index": "0+1;0+1;0+1;2", "aff_unique_norm": "Google;Princeton University;Microsoft", "aff_unique_dep": "Google AI;;Microsoft Research", "aff_unique_url": "https://ai.google;https://www.princeton.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Google AI;Princeton;MSR", "aff_campus_unique_index": "0+0;0+0;0+0;1", "aff_campus_unique": "Princeton;Redmond", "aff_country_unique_index": "0+0;0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "A Representation Learning Perspective on the Importance of Train-Validation Splitting in Meta-Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8759", "id": "8759", "proceeding": "http://proceedings.mlr.press/v139/saunshi21a.html", "slides": "", "author_site": "Nikunj Umesh Saunshi, Arushi Gupta, Wei Hu", "author": "Nikunj Saunshi; Arushi Gupta; Wei Hu", "abstract": "An effective approach in meta-learning is to utilize multiple \u201ctrain tasks\u201d to learn a good initialization for model parameters that can help solve unseen \u201ctest tasks\u201d with very few samples by fine-tuning from this initialization. Although successful in practice, theoretical understanding of such methods is limited. This work studies an important aspect of these methods: splitting the data from each task into train (support) and validation (query) sets during meta-training. Inspired by recent work (Raghu et al., 2020), we view such meta-learning methods through the lens of representation learning and argue that the train-validation split encourages the learned representation to be {\\em low-rank} without compromising on expressivity, as opposed to the non-splitting variant that encourages high-rank representations. Since sample efficiency benefits from low-rankness, the splitting strategy will require very few samples to solve unseen test tasks. We present theoretical results that formalize this idea for linear representation learning on a subspace meta-learning instance, and experimentally verify this practical benefit of splitting in simulations and on standard meta-learning benchmarks.", "bibtex": "@InProceedings{pmlr-v139-saunshi21a,\n title = \t {A Representation Learning Perspective on the Importance of Train-Validation Splitting in Meta-Learning},\n author = {Saunshi, Nikunj and Gupta, Arushi and Hu, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9333--9343},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/saunshi21a/saunshi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/saunshi21a.html},\n abstract = \t {An effective approach in meta-learning is to utilize multiple \u201ctrain tasks\u201d to learn a good initialization for model parameters that can help solve unseen \u201ctest tasks\u201d with very few samples by fine-tuning from this initialization. Although successful in practice, theoretical understanding of such methods is limited. This work studies an important aspect of these methods: splitting the data from each task into train (support) and validation (query) sets during meta-training. Inspired by recent work (Raghu et al., 2020), we view such meta-learning methods through the lens of representation learning and argue that the train-validation split encourages the learned representation to be {\\em low-rank} without compromising on expressivity, as opposed to the non-splitting variant that encourages high-rank representations. Since sample efficiency benefits from low-rankness, the splitting strategy will require very few samples to solve unseen test tasks. We present theoretical results that formalize this idea for linear representation learning on a subspace meta-learning instance, and experimentally verify this practical benefit of splitting in simulations and on standard meta-learning benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/saunshi21a/saunshi21a.pdf", "supp": "", "pdf_size": 1337432, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15485330124938854681&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Princeton University; Princeton University; Princeton University", "aff_domain": "cs.princeton.edu; ; ", "email": "cs.princeton.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/saunshi21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Riemannian Block Coordinate Descent Method for Computing the Projection Robust Wasserstein Distance", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8477", "id": "8477", "proceeding": "http://proceedings.mlr.press/v139/huang21e.html", "slides": "/media/icml-2021/Slides/8477.pdf", "author_site": "Minhui Huang, Shiqian Ma, Lifeng Lai", "author": "Minhui Huang; Shiqian Ma; Lifeng Lai", "abstract": "The Wasserstein distance has become increasingly important in machine learning and deep learning. Despite its popularity, the Wasserstein distance is hard to approximate because of the curse of dimensionality. A recently proposed approach to alleviate the curse of dimensionality is to project the sampled data from the high dimensional probability distribution onto a lower-dimensional subspace, and then compute the Wasserstein distance between the projected data. However, this approach requires to solve a max-min problem over the Stiefel manifold, which is very challenging in practice. In this paper, we propose a Riemannian block coordinate descent (RBCD) method to solve this problem, which is based on a novel reformulation of the regularized max-min problem over the Stiefel manifold. We show that the complexity of arithmetic operations for RBCD to obtain an $\\epsilon$-stationary point is $O(\\epsilon^{-3})$, which is significantly better than the complexity of existing methods. Numerical results on both synthetic and real datasets demonstrate that our method is more efficient than existing methods, especially when the number of sampled data is very large.", "bibtex": "@InProceedings{pmlr-v139-huang21e,\n title = \t {A Riemannian Block Coordinate Descent Method for Computing the Projection Robust Wasserstein Distance},\n author = {Huang, Minhui and Ma, Shiqian and Lai, Lifeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4446--4455},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21e/huang21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21e.html},\n abstract = \t {The Wasserstein distance has become increasingly important in machine learning and deep learning. Despite its popularity, the Wasserstein distance is hard to approximate because of the curse of dimensionality. A recently proposed approach to alleviate the curse of dimensionality is to project the sampled data from the high dimensional probability distribution onto a lower-dimensional subspace, and then compute the Wasserstein distance between the projected data. However, this approach requires to solve a max-min problem over the Stiefel manifold, which is very challenging in practice. In this paper, we propose a Riemannian block coordinate descent (RBCD) method to solve this problem, which is based on a novel reformulation of the regularized max-min problem over the Stiefel manifold. We show that the complexity of arithmetic operations for RBCD to obtain an $\\epsilon$-stationary point is $O(\\epsilon^{-3})$, which is significantly better than the complexity of existing methods. Numerical results on both synthetic and real datasets demonstrate that our method is more efficient than existing methods, especially when the number of sampled data is very large.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21e/huang21e.pdf", "supp": "", "pdf_size": 977094, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10760509061301899666&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, University of California, Davis, CA, USA; Department of Mathematics, University of California, Davis, CA, USA; Department of Electrical and Computer Engineering, University of California, Davis, CA, USA", "aff_domain": "ucdavis.edu;ucdavis.edu;ucdavis.edu", "email": "ucdavis.edu;ucdavis.edu;ucdavis.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/huang21e.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Davis", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucdavis.edu", "aff_unique_abbr": "UC Davis", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Davis", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Sampling-Based Method for Tensor Ring Decomposition", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8447", "id": "8447", "proceeding": "http://proceedings.mlr.press/v139/malik21b.html", "slides": "/media/icml-2021/Slides/8447.pdf", "author_site": "Osman Asif Malik, Stephen Becker", "author": "Osman Asif Malik; Stephen Becker", "abstract": "We propose a sampling-based method for computing the tensor ring (TR) decomposition of a data tensor. The method uses leverage score sampled alternating least squares to fit the TR cores in an iterative fashion. By taking advantage of the special structure of TR tensors, we can efficiently estimate the leverage scores and attain a method which has complexity sublinear in the number of input tensor entries. We provide high-probability relative-error guarantees for the sampled least squares problems. We compare our proposal to existing methods in experiments on both synthetic and real data. Our method achieves substantial speedup\u2014sometimes two or three orders of magnitude\u2014over competing methods, while maintaining good accuracy. We also provide an example of how our method can be used for rapid feature extraction.", "bibtex": "@InProceedings{pmlr-v139-malik21b,\n title = \t {A Sampling-Based Method for Tensor Ring Decomposition},\n author = {Malik, Osman Asif and Becker, Stephen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7400--7411},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/malik21b/malik21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/malik21b.html},\n abstract = \t {We propose a sampling-based method for computing the tensor ring (TR) decomposition of a data tensor. The method uses leverage score sampled alternating least squares to fit the TR cores in an iterative fashion. By taking advantage of the special structure of TR tensors, we can efficiently estimate the leverage scores and attain a method which has complexity sublinear in the number of input tensor entries. We provide high-probability relative-error guarantees for the sampled least squares problems. We compare our proposal to existing methods in experiments on both synthetic and real data. Our method achieves substantial speedup\u2014sometimes two or three orders of magnitude\u2014over competing methods, while maintaining good accuracy. We also provide an example of how our method can be used for rapid feature extraction.}\n}", "pdf": "http://proceedings.mlr.press/v139/malik21b/malik21b.pdf", "supp": "", "pdf_size": 625198, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9925150278480736841&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Applied Mathematics, University of Colorado Boulder, Boulder, CO, USA; Department of Applied Mathematics, University of Colorado Boulder, Boulder, CO, USA", "aff_domain": "colorado.edu; ", "email": "colorado.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/malik21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Colorado Boulder", "aff_unique_dep": "Department of Applied Mathematics", "aff_unique_url": "https://www.colorado.edu", "aff_unique_abbr": "CU Boulder", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boulder", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "A Scalable Deterministic Global Optimization Algorithm for Clustering Problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10243", "id": "10243", "proceeding": "http://proceedings.mlr.press/v139/hua21a.html", "slides": "/media/icml-2021/Slides/10243.pdf", "author_site": "Kaixun Hua, Mingfei Shi, Yankai Cao", "author": "Kaixun Hua; Mingfei Shi; Yankai Cao", "abstract": "The minimum sum-of-squares clustering (MSSC) task, which can be treated as a Mixed Integer Second Order Cone Programming (MISOCP) problem, is rarely investigated in the literature through deterministic optimization to find its global optimal value. In this paper, we modelled the MSSC task as a two-stage optimization problem and proposed a tailed reduced-space branch and bound (BB) algorithm. We designed several approaches to construct lower and upper bounds at each node in the BB scheme, including a scenario grouping based Lagrangian decomposition approach. One key advantage of this reduced-space algorithm is that it only needs to perform branching on the centers of clusters to guarantee convergence, and the size of centers is independent of the number of data samples. Moreover, the lower bounds can be computed by solving small-scale sample subproblems, and upper bounds can be obtained trivially. These two properties enable our algorithm easy to be paralleled and can be scalable to the dataset with up to 200,000 samples for finding a global $\\epsilon$-optimal solution of the MSSC task. We performed numerical experiments on both synthetic and real-world datasets and compared our proposed algorithms with the off-the-shelf global optimal solvers and classical local optimal algorithms. The results reveal a strong performance and scalability of our algorithm.", "bibtex": "@InProceedings{pmlr-v139-hua21a,\n title = \t {A Scalable Deterministic Global Optimization Algorithm for Clustering Problems},\n author = {Hua, Kaixun and Shi, Mingfei and Cao, Yankai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4391--4401},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hua21a/hua21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hua21a.html},\n abstract = \t {The minimum sum-of-squares clustering (MSSC) task, which can be treated as a Mixed Integer Second Order Cone Programming (MISOCP) problem, is rarely investigated in the literature through deterministic optimization to find its global optimal value. In this paper, we modelled the MSSC task as a two-stage optimization problem and proposed a tailed reduced-space branch and bound (BB) algorithm. We designed several approaches to construct lower and upper bounds at each node in the BB scheme, including a scenario grouping based Lagrangian decomposition approach. One key advantage of this reduced-space algorithm is that it only needs to perform branching on the centers of clusters to guarantee convergence, and the size of centers is independent of the number of data samples. Moreover, the lower bounds can be computed by solving small-scale sample subproblems, and upper bounds can be obtained trivially. These two properties enable our algorithm easy to be paralleled and can be scalable to the dataset with up to 200,000 samples for finding a global $\\epsilon$-optimal solution of the MSSC task. We performed numerical experiments on both synthetic and real-world datasets and compared our proposed algorithms with the off-the-shelf global optimal solvers and classical local optimal algorithms. The results reveal a strong performance and scalability of our algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/hua21a/hua21a.pdf", "supp": "", "pdf_size": 337250, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5998889529649233109&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Chemical and Biological Engineering, University of British Columbia, Vancouver, British Columbia, Canada; Department of Chemical and Biological Engineering, University of British Columbia, Vancouver, British Columbia, Canada; Department of Chemical and Biological Engineering, University of British Columbia, Vancouver, British Columbia, Canada", "aff_domain": "ubc.ca; ; ", "email": "ubc.ca; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hua21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of British Columbia", "aff_unique_dep": "Department of Chemical and Biological Engineering", "aff_unique_url": "https://www.ubc.ca", "aff_unique_abbr": "UBC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Vancouver", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "title": "A Scalable Second Order Method for Ill-Conditioned Matrix Completion from Few Samples", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9411", "id": "9411", "proceeding": "http://proceedings.mlr.press/v139/kummerle21a.html", "slides": "/media/icml-2021/Slides/9411.pdf", "author_site": "Christian K\u00fcmmerle, Claudio Mayrink Verdun", "author": "Christian K\u00fcmmerle; Claudio M. Verdun", "abstract": "We propose an iterative algorithm for low-rank matrix completion with that can be interpreted as an iteratively reweighted least squares (IRLS) algorithm, a saddle-escaping smoothing Newton method or a variable metric proximal gradient method applied to a non-convex rank surrogate. It combines the favorable data-efficiency of previous IRLS approaches with an improved scalability by several orders of magnitude. We establish the first local convergence guarantee from a minimal number of samples for that class of algorithms, showing that the method attains a local quadratic convergence rate. Furthermore, we show that the linear systems to be solved are well-conditioned even for very ill-conditioned ground truth matrices. We provide extensive experiments, indicating that unlike many state-of-the-art approaches, our method is able to complete very ill-conditioned matrices with a condition number of up to $10^{10}$ from few samples, while being competitive in its scalability.", "bibtex": "@InProceedings{pmlr-v139-kummerle21a,\n title = \t {A Scalable Second Order Method for Ill-Conditioned Matrix Completion from Few Samples},\n author = {K{\\\"u}mmerle, Christian and Verdun, Claudio M.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5872--5883},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kummerle21a/kummerle21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kummerle21a.html},\n abstract = \t {We propose an iterative algorithm for low-rank matrix completion with that can be interpreted as an iteratively reweighted least squares (IRLS) algorithm, a saddle-escaping smoothing Newton method or a variable metric proximal gradient method applied to a non-convex rank surrogate. It combines the favorable data-efficiency of previous IRLS approaches with an improved scalability by several orders of magnitude. We establish the first local convergence guarantee from a minimal number of samples for that class of algorithms, showing that the method attains a local quadratic convergence rate. Furthermore, we show that the linear systems to be solved are well-conditioned even for very ill-conditioned ground truth matrices. We provide extensive experiments, indicating that unlike many state-of-the-art approaches, our method is able to complete very ill-conditioned matrices with a condition number of up to $10^{10}$ from few samples, while being competitive in its scalability.}\n}", "pdf": "http://proceedings.mlr.press/v139/kummerle21a/kummerle21a.pdf", "supp": "", "pdf_size": 1832914, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9201585357486239881&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Applied Mathematics & Statistics, Johns Hopkins University, Baltimore, USA; Department of Mathematics and Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany", "aff_domain": "jhu.edu;tum.de", "email": "jhu.edu;tum.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kummerle21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Johns Hopkins University;Technical University of Munich", "aff_unique_dep": "Department of Applied Mathematics & Statistics;Department of Mathematics", "aff_unique_url": "https://www.jhu.edu;https://www.tum.de", "aff_unique_abbr": "JHU;TUM", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Baltimore;Munich", "aff_country_unique_index": "0;1", "aff_country_unique": "United States;Germany" }, { "title": "A Second look at Exponential and Cosine Step Sizes: Simplicity, Adaptivity, and Performance", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9527", "id": "9527", "proceeding": "http://proceedings.mlr.press/v139/li21z.html", "slides": "", "author_site": "Xiaoyu Li, Zhenxun Zhuang, Francesco Orabona", "author": "Xiaoyu Li; Zhenxun Zhuang; Francesco Orabona", "abstract": "Stochastic Gradient Descent (SGD) is a popular tool in training large-scale machine learning models. Its performance, however, is highly variable, depending crucially on the choice of the step sizes. Accordingly, a variety of strategies for tuning the step sizes have been proposed, ranging from coordinate-wise approaches (a.k.a. \u201cadaptive\u201d step sizes) to sophisticated heuristics to change the step size in each iteration. In this paper, we study two step size schedules whose power has been repeatedly confirmed in practice: the exponential and the cosine step sizes. For the first time, we provide theoretical support for them proving convergence rates for smooth non-convex functions, with and without the Polyak-\u0141{}ojasiewicz (PL) condition. Moreover, we show the surprising property that these two strategies are \\emph{adaptive} to the noise level in the stochastic gradients of PL functions. That is, contrary to polynomial step sizes, they achieve almost optimal performance without needing to know the noise level nor tuning their hyperparameters based on it. Finally, we conduct a fair and comprehensive empirical evaluation of real-world datasets with deep learning architectures. Results show that, even if only requiring at most two hyperparameters to tune, these two strategies best or match the performance of various finely-tuned state-of-the-art strategies.", "bibtex": "@InProceedings{pmlr-v139-li21z,\n title = \t {A Second look at Exponential and Cosine Step Sizes: Simplicity, Adaptivity, and Performance},\n author = {Li, Xiaoyu and Zhuang, Zhenxun and Orabona, Francesco},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6553--6564},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21z/li21z.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21z.html},\n abstract = \t {Stochastic Gradient Descent (SGD) is a popular tool in training large-scale machine learning models. Its performance, however, is highly variable, depending crucially on the choice of the step sizes. Accordingly, a variety of strategies for tuning the step sizes have been proposed, ranging from coordinate-wise approaches (a.k.a. \u201cadaptive\u201d step sizes) to sophisticated heuristics to change the step size in each iteration. In this paper, we study two step size schedules whose power has been repeatedly confirmed in practice: the exponential and the cosine step sizes. For the first time, we provide theoretical support for them proving convergence rates for smooth non-convex functions, with and without the Polyak-\u0141{}ojasiewicz (PL) condition. Moreover, we show the surprising property that these two strategies are \\emph{adaptive} to the noise level in the stochastic gradients of PL functions. That is, contrary to polynomial step sizes, they achieve almost optimal performance without needing to know the noise level nor tuning their hyperparameters based on it. Finally, we conduct a fair and comprehensive empirical evaluation of real-world datasets with deep learning architectures. Results show that, even if only requiring at most two hyperparameters to tune, these two strategies best or match the performance of various finely-tuned state-of-the-art strategies.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21z/li21z.pdf", "supp": "", "pdf_size": 1049998, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4020044467123428903&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Division of System Engineering, Boston University, Boston, MA, US+Department of Computer Science, Boston University, Boston, MA, US+Department of Electrical & Computer Engineering, Boston University, Boston, MA, US; Division of System Engineering, Boston University, Boston, MA, US+Department of Computer Science, Boston University, Boston, MA, US+Department of Electrical & Computer Engineering, Boston University, Boston, MA, US; Division of System Engineering, Boston University, Boston, MA, US+Department of Computer Science, Boston University, Boston, MA, US+Department of Electrical & Computer Engineering, Boston University, Boston, MA, US", "aff_domain": "bu.edu;bu.edu; ", "email": "bu.edu;bu.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21z.html", "aff_unique_index": "0+0+0;0+0+0;0+0+0", "aff_unique_norm": "Boston University", "aff_unique_dep": "Division of System Engineering", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "0+0+0;0+0+0;0+0+0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", "aff_country_unique": "United States" }, { "title": "A Sharp Analysis of Model-based Reinforcement Learning with Self-Play", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8981", "id": "8981", "proceeding": "http://proceedings.mlr.press/v139/liu21z.html", "slides": "", "author_site": "Qinghua Liu, Tiancheng Yu, Yu Bai, Chi Jin", "author": "Qinghua Liu; Tiancheng Yu; Yu Bai; Chi Jin", "abstract": "Model-based algorithms\u2014algorithms that explore the environment through building and utilizing an estimated model\u2014are widely used in reinforcement learning practice and theoretically shown to achieve optimal sample efficiency for single-agent reinforcement learning in Markov Decision Processes (MDPs). However, for multi-agent reinforcement learning in Markov games, the current best known sample complexity for model-based algorithms is rather suboptimal and compares unfavorably against recent model-free approaches. In this paper, we present a sharp analysis of model-based self-play algorithms for multi-agent Markov games. We design an algorithm \\emph{Optimistic Nash Value Iteration} (Nash-VI) for two-player zero-sum Markov games that is able to output an $\\epsilon$-approximate Nash policy in $\\tilde{\\mathcal{O}}(H^3SAB/\\epsilon^2)$ episodes of game playing, where $S$ is the number of states, $A,B$ are the number of actions for the two players respectively, and $H$ is the horizon length. This significantly improves over the best known model-based guarantee of $\\tilde{\\mathcal{O}}(H^4S^2AB/\\epsilon^2)$, and is the first that matches the information-theoretic lower bound $\\Omega(H^3S(A+B)/\\epsilon^2)$ except for a $\\min\\{A,B\\}$ factor. In addition, our guarantee compares favorably against the best known model-free algorithm if $\\min\\{A,B\\}=o(H^3)$, and outputs a single Markov policy while existing sample-efficient model-free algorithms output a nested mixture of Markov policies that is in general non-Markov and rather inconvenient to store and execute. We further adapt our analysis to designing a provably efficient task-agnostic algorithm for zero-sum Markov games, and designing the first line of provably sample-efficient algorithms for multi-player general-sum Markov games.", "bibtex": "@InProceedings{pmlr-v139-liu21z,\n title = \t {A Sharp Analysis of Model-based Reinforcement Learning with Self-Play},\n author = {Liu, Qinghua and Yu, Tiancheng and Bai, Yu and Jin, Chi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7001--7010},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21z/liu21z.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21z.html},\n abstract = \t {Model-based algorithms\u2014algorithms that explore the environment through building and utilizing an estimated model\u2014are widely used in reinforcement learning practice and theoretically shown to achieve optimal sample efficiency for single-agent reinforcement learning in Markov Decision Processes (MDPs). However, for multi-agent reinforcement learning in Markov games, the current best known sample complexity for model-based algorithms is rather suboptimal and compares unfavorably against recent model-free approaches. In this paper, we present a sharp analysis of model-based self-play algorithms for multi-agent Markov games. We design an algorithm \\emph{Optimistic Nash Value Iteration} (Nash-VI) for two-player zero-sum Markov games that is able to output an $\\epsilon$-approximate Nash policy in $\\tilde{\\mathcal{O}}(H^3SAB/\\epsilon^2)$ episodes of game playing, where $S$ is the number of states, $A,B$ are the number of actions for the two players respectively, and $H$ is the horizon length. This significantly improves over the best known model-based guarantee of $\\tilde{\\mathcal{O}}(H^4S^2AB/\\epsilon^2)$, and is the first that matches the information-theoretic lower bound $\\Omega(H^3S(A+B)/\\epsilon^2)$ except for a $\\min\\{A,B\\}$ factor. In addition, our guarantee compares favorably against the best known model-free algorithm if $\\min\\{A,B\\}=o(H^3)$, and outputs a single Markov policy while existing sample-efficient model-free algorithms output a nested mixture of Markov policies that is in general non-Markov and rather inconvenient to store and execute. We further adapt our analysis to designing a provably efficient task-agnostic algorithm for zero-sum Markov games, and designing the first line of provably sample-efficient algorithms for multi-player general-sum Markov games.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21z/liu21z.pdf", "supp": "", "pdf_size": 325485, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10803750602455622490&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Princeton University; Massachusetts Institute of Technology; Salesforce Research; Princeton University", "aff_domain": "princeton.edu; ; ; ", "email": "princeton.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21z.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Princeton University;Massachusetts Institute of Technology;Salesforce", "aff_unique_dep": ";;Salesforce Research", "aff_unique_url": "https://www.princeton.edu;https://web.mit.edu;https://research.salesforce.com", "aff_unique_abbr": "Princeton;MIT;Salesforce", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "A Structured Observation Distribution for Generative Biological Sequence Prediction and Forecasting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9725", "id": "9725", "proceeding": "http://proceedings.mlr.press/v139/weinstein21a.html", "slides": "/media/icml-2021/Slides/9725.pdf", "author_site": "Eli N. Weinstein, Debora Marks", "author": "Eli N Weinstein; Debora Marks", "abstract": "Generative probabilistic modeling of biological sequences has widespread existing and potential application across biology and biomedicine, from evolutionary biology to epidemiology to protein design. Many standard sequence analysis methods preprocess data using a multiple sequence alignment (MSA) algorithm, one of the most widely used computational methods in all of science. However, as we show in this article, training generative probabilistic models with MSA preprocessing leads to statistical pathologies in the context of sequence prediction and forecasting. To address these problems, we propose a principled drop-in alternative to MSA preprocessing in the form of a structured observation distribution (the \"MuE\" distribution). We prove theoretically that the MuE distribution comprehensively generalizes popular methods for inferring biological sequence alignments, and provide a precise characterization of how such biological models have differed from natural language latent alignment models. We show empirically that models that use the MuE as an observation distribution outperform comparable methods across a variety of datasets, and apply MuE models to a novel problem for generative probabilistic sequence models: forecasting pathogen evolution.", "bibtex": "@InProceedings{pmlr-v139-weinstein21a,\n title = \t {A Structured Observation Distribution for Generative Biological Sequence Prediction and Forecasting},\n author = {Weinstein, Eli N and Marks, Debora},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11068--11079},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/weinstein21a/weinstein21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/weinstein21a.html},\n abstract = \t {Generative probabilistic modeling of biological sequences has widespread existing and potential application across biology and biomedicine, from evolutionary biology to epidemiology to protein design. Many standard sequence analysis methods preprocess data using a multiple sequence alignment (MSA) algorithm, one of the most widely used computational methods in all of science. However, as we show in this article, training generative probabilistic models with MSA preprocessing leads to statistical pathologies in the context of sequence prediction and forecasting. To address these problems, we propose a principled drop-in alternative to MSA preprocessing in the form of a structured observation distribution (the \"MuE\" distribution). We prove theoretically that the MuE distribution comprehensively generalizes popular methods for inferring biological sequence alignments, and provide a precise characterization of how such biological models have differed from natural language latent alignment models. We show empirically that models that use the MuE as an observation distribution outperform comparable methods across a variety of datasets, and apply MuE models to a novel problem for generative probabilistic sequence models: forecasting pathogen evolution.}\n}", "pdf": "http://proceedings.mlr.press/v139/weinstein21a/weinstein21a.pdf", "supp": "", "pdf_size": 3781605, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4418917220702982497&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Program in Biophysics, Harvard University, Cambridge, MA, USA; Department of Systems Biology, Harvard Medical School, Boston, MA, USA + Broad Institute of Harvard and MIT, Cambridge, MA, USA", "aff_domain": "g.harvard.edu;hms.harvard.edu", "email": "g.harvard.edu;hms.harvard.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/weinstein21a.html", "aff_unique_index": "0;1+2", "aff_unique_norm": "Harvard University;Harvard Medical School;Broad Institute", "aff_unique_dep": "Program in Biophysics;Department of Systems Biology;", "aff_unique_url": "https://www.harvard.edu;https://hms.harvard.edu;https://www.broadinstitute.org", "aff_unique_abbr": "Harvard;HMS;Broad", "aff_campus_unique_index": "0;1+0", "aff_campus_unique": "Cambridge;Boston", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "title": "A Tale of Two Efficient and Informative Negative Sampling Distributions", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9735", "id": "9735", "proceeding": "http://proceedings.mlr.press/v139/daghaghi21a.html", "slides": "", "author_site": "Shabnam Daghaghi, Tharun Medini, Nicholas Meisburger, Beidi Chen, Mengnan Zhao, Anshumali Shrivastava", "author": "Shabnam Daghaghi; Tharun Medini; Nicholas Meisburger; Beidi Chen; Mengnan Zhao; Anshumali Shrivastava", "abstract": "Softmax classifiers with a very large number of classes naturally occur in many applications such as natural language processing and information retrieval. The calculation of full softmax is costly from the computational and energy perspective. There have been various sampling approaches to overcome this challenge, popularly known as negative sampling (NS). Ideally, NS should sample negative classes from a distribution that is dependent on the input data, the current parameters, and the correct positive class. Unfortunately, due to the dynamically updated parameters and data samples, there is no sampling scheme that is provably adaptive and samples the negative classes efficiently. Therefore, alternative heuristics like random sampling, static frequency-based sampling, or learning-based biased sampling, which primarily trade either the sampling cost or the adaptivity of samples per iteration are adopted. In this paper, we show two classes of distributions where the sampling scheme is truly adaptive and provably generates negative samples in near-constant time. Our implementation in C++ on CPU is significantly superior, both in terms of wall-clock time and accuracy, compared to the most optimized TensorFlow implementations of other popular negative sampling approaches on powerful NVIDIA V100 GPU.", "bibtex": "@InProceedings{pmlr-v139-daghaghi21a,\n title = \t {A Tale of Two Efficient and Informative Negative Sampling Distributions},\n author = {Daghaghi, Shabnam and Medini, Tharun and Meisburger, Nicholas and Chen, Beidi and Zhao, Mengnan and Shrivastava, Anshumali},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2319--2329},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/daghaghi21a/daghaghi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/daghaghi21a.html},\n abstract = \t {Softmax classifiers with a very large number of classes naturally occur in many applications such as natural language processing and information retrieval. The calculation of full softmax is costly from the computational and energy perspective. There have been various sampling approaches to overcome this challenge, popularly known as negative sampling (NS). Ideally, NS should sample negative classes from a distribution that is dependent on the input data, the current parameters, and the correct positive class. Unfortunately, due to the dynamically updated parameters and data samples, there is no sampling scheme that is provably adaptive and samples the negative classes efficiently. Therefore, alternative heuristics like random sampling, static frequency-based sampling, or learning-based biased sampling, which primarily trade either the sampling cost or the adaptivity of samples per iteration are adopted. In this paper, we show two classes of distributions where the sampling scheme is truly adaptive and provably generates negative samples in near-constant time. Our implementation in C++ on CPU is significantly superior, both in terms of wall-clock time and accuracy, compared to the most optimized TensorFlow implementations of other popular negative sampling approaches on powerful NVIDIA V100 GPU.}\n}", "pdf": "http://proceedings.mlr.press/v139/daghaghi21a/daghaghi21a.pdf", "supp": "", "pdf_size": 1085318, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7679751818270710644&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, Rice University; Department of Computer Science, Rice University; Department of Computer Science, Stanford University; Department of Electrical and Computer Engineering, Rice University; Department of Computer Science, Rice University", "aff_domain": "rice.edu; ; ; ; ; ", "email": "rice.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/daghaghi21a.html", "aff_unique_index": "0;0;0;1;0;0", "aff_unique_norm": "Rice University;Stanford University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Computer Science", "aff_unique_url": "https://www.rice.edu;https://www.stanford.edu", "aff_unique_abbr": "Rice;Stanford", "aff_campus_unique_index": "1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "A Theory of Label Propagation for Subpopulation Shift", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9441", "id": "9441", "proceeding": "http://proceedings.mlr.press/v139/cai21b.html", "slides": "/media/icml-2021/Slides/9441.pdf", "author_site": "Tianle Cai, Ruiqi Gao, Jason Lee, Qi Lei", "author": "Tianle Cai; Ruiqi Gao; Jason Lee; Qi Lei", "abstract": "One of the central problems in machine learning is domain adaptation. Different from past theoretical works, we consider a new model of subpopulation shift in the input or representation space. In this work, we propose a provably effective framework based on label propagation by using an input consistency loss. In our analysis we used a simple but realistic \u201cexpansion\u201d assumption, which has been proposed in \\citet{wei2021theoretical}. It turns out that based on a teacher classifier on the source domain, the learned classifier can not only propagate to the target domain but also improve upon the teacher. By leveraging existing generalization bounds, we also obtain end-to-end finite-sample guarantees on deep neural networks. In addition, we extend our theoretical framework to a more general setting of source-to-target transfer based on an additional unlabeled dataset, which can be easily applied to various learning scenarios. Inspired by our theory, we adapt consistency-based semi-supervised learning methods to domain adaptation settings and gain significant improvements.", "bibtex": "@InProceedings{pmlr-v139-cai21b,\n title = \t {A Theory of Label Propagation for Subpopulation Shift},\n author = {Cai, Tianle and Gao, Ruiqi and Lee, Jason and Lei, Qi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1170--1182},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21b/cai21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21b.html},\n abstract = \t {One of the central problems in machine learning is domain adaptation. Different from past theoretical works, we consider a new model of subpopulation shift in the input or representation space. In this work, we propose a provably effective framework based on label propagation by using an input consistency loss. In our analysis we used a simple but realistic \u201cexpansion\u201d assumption, which has been proposed in \\citet{wei2021theoretical}. It turns out that based on a teacher classifier on the source domain, the learned classifier can not only propagate to the target domain but also improve upon the teacher. By leveraging existing generalization bounds, we also obtain end-to-end finite-sample guarantees on deep neural networks. In addition, we extend our theoretical framework to a more general setting of source-to-target transfer based on an additional unlabeled dataset, which can be easily applied to various learning scenarios. Inspired by our theory, we adapt consistency-based semi-supervised learning methods to domain adaptation settings and gain significant improvements.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21b/cai21b.pdf", "supp": "", "pdf_size": 508434, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7357593794467720715&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Princeton University+Zhongguancun Haihua Institute for Frontier Information Technology; Princeton University+Zhongguancun Haihua Institute for Frontier Information Technology; Princeton University; Princeton University", "aff_domain": "princeton.edu; ; ; ", "email": "princeton.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cai21b.html", "aff_unique_index": "0+1;0+1;0;0", "aff_unique_norm": "Princeton University;Zhongguancun Haihua Institute for Frontier Information Technology", "aff_unique_dep": ";Institute for Frontier Information Technology", "aff_unique_url": "https://www.princeton.edu;", "aff_unique_abbr": "Princeton;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+1;0;0", "aff_country_unique": "United States;China" }, { "title": "A Unified Generative Adversarial Network Training via Self-Labeling and Self-Attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9517", "id": "9517", "proceeding": "http://proceedings.mlr.press/v139/watanabe21a.html", "slides": "", "author_site": "Tomoki Watanabe, Paolo Favaro", "author": "Tomoki Watanabe; Paolo Favaro", "abstract": "We propose a novel GAN training scheme that can handle any level of labeling in a unified manner. Our scheme introduces a form of artificial labeling that can incorporate manually defined labels, when available, and induce an alignment between them. To define the artificial labels, we exploit the assumption that neural network generators can be trained more easily to map nearby latent vectors to data with semantic similarities, than across separate categories. We use generated data samples and their corresponding artificial conditioning labels to train a classifier. The classifier is then used to self-label real data. To boost the accuracy of the self-labeling, we also use the exponential moving average of the classifier. However, because the classifier might still make mistakes, especially at the beginning of the training, we also refine the labels through self-attention, by using the labeling of real data samples only when the classifier outputs a high classification probability score. We evaluate our approach on CIFAR-10, STL-10 and SVHN, and show that both self-labeling and self-attention consistently improve the quality of generated data. More surprisingly, we find that the proposed scheme can even outperform class-conditional GANs.", "bibtex": "@InProceedings{pmlr-v139-watanabe21a,\n title = \t {A Unified Generative Adversarial Network Training via Self-Labeling and Self-Attention},\n author = {Watanabe, Tomoki and Favaro, Paolo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11024--11034},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/watanabe21a/watanabe21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/watanabe21a.html},\n abstract = \t {We propose a novel GAN training scheme that can handle any level of labeling in a unified manner. Our scheme introduces a form of artificial labeling that can incorporate manually defined labels, when available, and induce an alignment between them. To define the artificial labels, we exploit the assumption that neural network generators can be trained more easily to map nearby latent vectors to data with semantic similarities, than across separate categories. We use generated data samples and their corresponding artificial conditioning labels to train a classifier. The classifier is then used to self-label real data. To boost the accuracy of the self-labeling, we also use the exponential moving average of the classifier. However, because the classifier might still make mistakes, especially at the beginning of the training, we also refine the labels through self-attention, by using the labeling of real data samples only when the classifier outputs a high classification probability score. We evaluate our approach on CIFAR-10, STL-10 and SVHN, and show that both self-labeling and self-attention consistently improve the quality of generated data. More surprisingly, we find that the proposed scheme can even outperform class-conditional GANs.}\n}", "pdf": "http://proceedings.mlr.press/v139/watanabe21a/watanabe21a.pdf", "supp": "", "pdf_size": 8503654, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1150313063836380429&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Toshiba Corporation, Kawasaki, Japan + University of Bern, Bern, Switzerland; University of Bern, Bern, Switzerland", "aff_domain": "toshiba.co.jp;inf.unibe.ch", "email": "toshiba.co.jp;inf.unibe.ch", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/watanabe21a.html", "aff_unique_index": "0+1;1", "aff_unique_norm": "Toshiba Corporation;University of Bern", "aff_unique_dep": ";", "aff_unique_url": "https://www.toshiba.co.jp;https://www.unibe.ch", "aff_unique_abbr": "Toshiba;UniBE", "aff_campus_unique_index": "0+1;1", "aff_campus_unique": "Kawasaki;Bern", "aff_country_unique_index": "0+1;1", "aff_country_unique": "Japan;Switzerland" }, { "title": "A Unified Lottery Ticket Hypothesis for Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10503", "id": "10503", "proceeding": "http://proceedings.mlr.press/v139/chen21p.html", "slides": "", "author_site": "Tianlong Chen, Yongduo Sui, Xuxi Chen, Aston Zhang, Zhangyang \u201cAtlas\u201d Wang", "author": "Tianlong Chen; Yongduo Sui; Xuxi Chen; Aston Zhang; Zhangyang Wang", "abstract": "With graphs rapidly growing in size and deeper graph neural networks (GNNs) emerging, the training and inference of GNNs become increasingly expensive. Existing network weight pruning algorithms cannot address the main space and computational bottleneck in GNNs, caused by the size and connectivity of the graph. To this end, this paper first presents a unified GNN sparsification (UGS) framework that simultaneously prunes the graph adjacency matrix and the model weights, for effectively accelerating GNN inference on large-scale graphs. Leveraging this new tool, we further generalize the recently popular lottery ticket hypothesis to GNNs for the first time, by defining a graph lottery ticket (GLT) as a pair of core sub-dataset and sparse sub-network, which can be jointly identified from the original GNN and the full dense graph by iteratively applying UGS. Like its counterpart in convolutional neural networks, GLT can be trained in isolation to match the performance of training with the full model and graph, and can be drawn from both randomly initialized and self-supervised pre-trained GNNs. Our proposal has been experimentally verified across various GNN architectures and diverse tasks, on both small-scale graph datasets (Cora, Citeseer and PubMed), and large-scale datasets from the challenging Open Graph Benchmark (OGB). Specifically, for node classification, our found GLTs achieve the same accuracies with 20%\u00a098% MACs saving on small graphs and 25%\u00a085% MACs saving on large ones. For link prediction, GLTs lead to 48%\u00a097% and 70% MACs saving on small and large graph datasets, respectively, without compromising predictive performance. Codes are at https://github.com/VITA-Group/Unified-LTH-GNN.", "bibtex": "@InProceedings{pmlr-v139-chen21p,\n title = \t {A Unified Lottery Ticket Hypothesis for Graph Neural Networks},\n author = {Chen, Tianlong and Sui, Yongduo and Chen, Xuxi and Zhang, Aston and Wang, Zhangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1695--1706},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21p/chen21p.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21p.html},\n abstract = \t {With graphs rapidly growing in size and deeper graph neural networks (GNNs) emerging, the training and inference of GNNs become increasingly expensive. Existing network weight pruning algorithms cannot address the main space and computational bottleneck in GNNs, caused by the size and connectivity of the graph. To this end, this paper first presents a unified GNN sparsification (UGS) framework that simultaneously prunes the graph adjacency matrix and the model weights, for effectively accelerating GNN inference on large-scale graphs. Leveraging this new tool, we further generalize the recently popular lottery ticket hypothesis to GNNs for the first time, by defining a graph lottery ticket (GLT) as a pair of core sub-dataset and sparse sub-network, which can be jointly identified from the original GNN and the full dense graph by iteratively applying UGS. Like its counterpart in convolutional neural networks, GLT can be trained in isolation to match the performance of training with the full model and graph, and can be drawn from both randomly initialized and self-supervised pre-trained GNNs. Our proposal has been experimentally verified across various GNN architectures and diverse tasks, on both small-scale graph datasets (Cora, Citeseer and PubMed), and large-scale datasets from the challenging Open Graph Benchmark (OGB). Specifically, for node classification, our found GLTs achieve the same accuracies with 20%\u00a098% MACs saving on small graphs and 25%\u00a085% MACs saving on large ones. For link prediction, GLTs lead to 48%\u00a097% and 70% MACs saving on small and large graph datasets, respectively, without compromising predictive performance. Codes are at https://github.com/VITA-Group/Unified-LTH-GNN.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21p/chen21p.pdf", "supp": "", "pdf_size": 3740816, "gs_citation": 227, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14150091349849211712&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, University of Texas at Austin+AWS Deep Learning; University of Science and Technology of China; Department of Electrical and Computer Engineering, University of Texas at Austin; AWS Deep Learning; Department of Electrical and Computer Engineering, University of Texas at Austin", "aff_domain": "utexas.edu;ustc.edu.cn;utexas.edu;amazon.com;utexas.edu", "email": "utexas.edu;ustc.edu.cn;utexas.edu;amazon.com;utexas.edu", "github": "https://github.com/VITA-Group/Unified-LTH-GNN", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/chen21p.html", "aff_unique_index": "0+1;2;0;1;0", "aff_unique_norm": "University of Texas at Austin;Amazon;University of Science and Technology of China", "aff_unique_dep": "Department of Electrical and Computer Engineering;AWS Deep Learning;", "aff_unique_url": "https://www.utexas.edu;https://aws.amazon.com/machine-learning/;http://www.ustc.edu.cn", "aff_unique_abbr": "UT Austin;AWS;USTC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Austin;", "aff_country_unique_index": "0+0;1;0;0;0", "aff_country_unique": "United States;China" }, { "title": "A Value-Function-based Interior-point Method for Non-convex Bi-level Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9581", "id": "9581", "proceeding": "http://proceedings.mlr.press/v139/liu21o.html", "slides": "", "author_site": "Risheng Liu, Xuan Liu, Xiaoming Yuan, Shangzhi Zeng, Jin Zhang", "author": "Risheng Liu; Xuan Liu; Xiaoming Yuan; Shangzhi Zeng; Jin Zhang", "abstract": "Bi-level optimization model is able to capture a wide range of complex learning tasks with practical interest. Due to the witnessed efficiency in solving bi-level programs, gradient-based methods have gained popularity in the machine learning community. In this work, we propose a new gradient-based solution scheme, namely, the Bi-level Value-Function-based Interior-point Method (BVFIM). Following the main idea of the log-barrier interior-point scheme, we penalize the regularized value function of the lower level problem into the upper level objective. By further solving a sequence of differentiable unconstrained approximation problems, we consequently derive a sequential programming scheme. The numerical advantage of our scheme relies on the fact that, when gradient methods are applied to solve the approximation problem, we successfully avoid computing any expensive Hessian-vector or Jacobian-vector product. We prove the convergence without requiring any convexity assumption on either the upper level or the lower level objective. Experiments demonstrate the efficiency of the proposed BVFIM on non-convex bi-level problems.", "bibtex": "@InProceedings{pmlr-v139-liu21o,\n title = \t {A Value-Function-based Interior-point Method for Non-convex Bi-level Optimization},\n author = {Liu, Risheng and Liu, Xuan and Yuan, Xiaoming and Zeng, Shangzhi and Zhang, Jin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6882--6892},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21o/liu21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21o.html},\n abstract = \t {Bi-level optimization model is able to capture a wide range of complex learning tasks with practical interest. Due to the witnessed efficiency in solving bi-level programs, gradient-based methods have gained popularity in the machine learning community. In this work, we propose a new gradient-based solution scheme, namely, the Bi-level Value-Function-based Interior-point Method (BVFIM). Following the main idea of the log-barrier interior-point scheme, we penalize the regularized value function of the lower level problem into the upper level objective. By further solving a sequence of differentiable unconstrained approximation problems, we consequently derive a sequential programming scheme. The numerical advantage of our scheme relies on the fact that, when gradient methods are applied to solve the approximation problem, we successfully avoid computing any expensive Hessian-vector or Jacobian-vector product. We prove the convergence without requiring any convexity assumption on either the upper level or the lower level objective. Experiments demonstrate the efficiency of the proposed BVFIM on non-convex bi-level problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21o/liu21o.pdf", "supp": "", "pdf_size": 883895, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11609939348946281010&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "DUT-RU International School of Information Science and Engineering, Dalian University of Technology + Key Laboratory for Ubiquitous Network and Service Software of Liaoning Province + Pazhou Lab, Guangzhou; DUT-RU International School of Information Science and Engineering, Dalian University of Technology + Key Laboratory for Ubiquitous Network and Service Software of Liaoning Province; Department of Mathematics, The University of Hong Kong; Department of Mathematics, The University of Hong Kong; Department of Mathematics, Southern University of Science and Technology + National Center for Applied Mathematics Shenzhen", "aff_domain": "sustech.edu.cn; ; ; ;sustech.edu.cn", "email": "sustech.edu.cn; ; ; ;sustech.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21o.html", "aff_unique_index": "0+1+2;0+1;3;3;4+5", "aff_unique_norm": "Dalian University of Technology;Liaoning Province Key Laboratory for Ubiquitous Network and Service Software;Pazhou Lab;University of Hong Kong;Southern University of Science and Technology;National Center for Applied Mathematics", "aff_unique_dep": "International School of Information Science and Engineering;Key Laboratory for Ubiquitous Network and Service Software;;Department of Mathematics;Department of Mathematics;", "aff_unique_url": "http://en.dlut.edu.cn/;;;https://www.hku.hk;https://www.sustech.edu.cn;", "aff_unique_abbr": "DUT;;;HKU;SUSTech;", "aff_campus_unique_index": "0+2;0;3;3;4", "aff_campus_unique": "Dalian;;Guangzhou;Hong Kong SAR;Shenzhen", "aff_country_unique_index": "0+0+0;0+0;0;0;0+0", "aff_country_unique": "China" }, { "title": "A Wasserstein Minimax Framework for Mixed Linear Regression", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8949", "id": "8949", "proceeding": "http://proceedings.mlr.press/v139/diamandis21a.html", "slides": "/media/icml-2021/Slides/8949_293AXKB.pdf", "author_site": "Theo Diamandis, Yonina Eldar, Alireza Fallah, Farzan Farnia, Asuman Ozdaglar", "author": "Theo Diamandis; Yonina Eldar; Alireza Fallah; Farzan Farnia; Asuman Ozdaglar", "abstract": "Multi-modal distributions are commonly used to model clustered data in statistical learning tasks. In this paper, we consider the Mixed Linear Regression (MLR) problem. We propose an optimal transport-based framework for MLR problems, Wasserstein Mixed Linear Regression (WMLR), which minimizes the Wasserstein distance between the learned and target mixture regression models. Through a model-based duality analysis, WMLR reduces the underlying MLR task to a nonconvex-concave minimax optimization problem, which can be provably solved to find a minimax stationary point by the Gradient Descent Ascent (GDA) algorithm. In the special case of mixtures of two linear regression models, we show that WMLR enjoys global convergence and generalization guarantees. We prove that WMLR\u2019s sample complexity grows linearly with the dimension of data. Finally, we discuss the application of WMLR to the federated learning task where the training samples are collected by multiple agents in a network. Unlike the Expectation-Maximization algorithm, WMLR directly extends to the distributed, federated learning setting. We support our theoretical results through several numerical experiments, which highlight our framework\u2019s ability to handle the federated learning setting with mixture models.", "bibtex": "@InProceedings{pmlr-v139-diamandis21a,\n title = \t {A Wasserstein Minimax Framework for Mixed Linear Regression},\n author = {Diamandis, Theo and Eldar, Yonina and Fallah, Alireza and Farnia, Farzan and Ozdaglar, Asuman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2697--2706},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/diamandis21a/diamandis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/diamandis21a.html},\n abstract = \t {Multi-modal distributions are commonly used to model clustered data in statistical learning tasks. In this paper, we consider the Mixed Linear Regression (MLR) problem. We propose an optimal transport-based framework for MLR problems, Wasserstein Mixed Linear Regression (WMLR), which minimizes the Wasserstein distance between the learned and target mixture regression models. Through a model-based duality analysis, WMLR reduces the underlying MLR task to a nonconvex-concave minimax optimization problem, which can be provably solved to find a minimax stationary point by the Gradient Descent Ascent (GDA) algorithm. In the special case of mixtures of two linear regression models, we show that WMLR enjoys global convergence and generalization guarantees. We prove that WMLR\u2019s sample complexity grows linearly with the dimension of data. Finally, we discuss the application of WMLR to the federated learning task where the training samples are collected by multiple agents in a network. Unlike the Expectation-Maximization algorithm, WMLR directly extends to the distributed, federated learning setting. We support our theoretical results through several numerical experiments, which highlight our framework\u2019s ability to handle the federated learning setting with mixture models.}\n}", "pdf": "http://proceedings.mlr.press/v139/diamandis21a/diamandis21a.pdf", "supp": "", "pdf_size": 536283, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3546795848288703283&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Electrical Engineering & Computer Science, MIT, USA+Faculty of Math and Computer Science, Weizmann Institute of Science, Israel; Faculty of Math and Computer Science, Weizmann Institute of Science, Israel; Department of Electrical Engineering & Computer Science, MIT, USA; Department of Electrical Engineering & Computer Science, MIT, USA; Department of Electrical Engineering & Computer Science, MIT, USA", "aff_domain": "mit.edu; ;mit.edu;mit.edu; ", "email": "mit.edu; ;mit.edu;mit.edu; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/diamandis21a.html", "aff_unique_index": "0+1;1;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology;Weizmann Institute of Science", "aff_unique_dep": "Department of Electrical Engineering & Computer Science;Faculty of Math and Computer Science", "aff_unique_url": "https://web.mit.edu;https://www.weizmann.ac.il", "aff_unique_abbr": "MIT;Weizmann", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0+1;1;0;0;0", "aff_country_unique": "United States;Israel" }, { "title": "A Zeroth-Order Block Coordinate Descent Algorithm for Huge-Scale Black-Box Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9407", "id": "9407", "proceeding": "http://proceedings.mlr.press/v139/cai21d.html", "slides": "/media/icml-2021/Slides/9407.pdf", "author_site": "HanQin Cai, Yuchen Lou, Daniel Mckenzie, Wotao Yin", "author": "Hanqin Cai; Yuchen Lou; Daniel Mckenzie; Wotao Yin", "abstract": "We consider the zeroth-order optimization problem in the huge-scale setting, where the dimension of the problem is so large that performing even basic vector operations on the decision variables is infeasible. In this paper, we propose a novel algorithm, coined ZO-BCD, that exhibits favorable overall query complexity and has a much smaller per-iteration computational complexity. In addition, we discuss how the memory footprint of ZO-BCD can be reduced even further by the clever use of circulant measurement matrices. As an application of our new method, we propose the idea of crafting adversarial attacks on neural network based classifiers in a wavelet domain, which can result in problem dimensions of over one million. In particular, we show that crafting adversarial examples to audio classifiers in a wavelet domain can achieve the state-of-the-art attack success rate of 97.9% with significantly less distortion.", "bibtex": "@InProceedings{pmlr-v139-cai21d,\n title = \t {A Zeroth-Order Block Coordinate Descent Algorithm for Huge-Scale Black-Box Optimization},\n author = {Cai, Hanqin and Lou, Yuchen and Mckenzie, Daniel and Yin, Wotao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1193--1203},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21d/cai21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21d.html},\n abstract = \t {We consider the zeroth-order optimization problem in the huge-scale setting, where the dimension of the problem is so large that performing even basic vector operations on the decision variables is infeasible. In this paper, we propose a novel algorithm, coined ZO-BCD, that exhibits favorable overall query complexity and has a much smaller per-iteration computational complexity. In addition, we discuss how the memory footprint of ZO-BCD can be reduced even further by the clever use of circulant measurement matrices. As an application of our new method, we propose the idea of crafting adversarial attacks on neural network based classifiers in a wavelet domain, which can result in problem dimensions of over one million. In particular, we show that crafting adversarial examples to audio classifiers in a wavelet domain can achieve the state-of-the-art attack success rate of 97.9% with significantly less distortion.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21d/cai21d.pdf", "supp": "", "pdf_size": 2674768, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10394095959262689530&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cai21d.html" }, { "title": "A large-scale benchmark for few-shot program induction and synthesis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9205", "id": "9205", "proceeding": "http://proceedings.mlr.press/v139/alet21a.html", "slides": "", "author_site": "Ferran Alet, Javier Lopez-Contreras, James Koppel, Maxwell Nye, Armando Solar-Lezama, Tomas Lozano-Perez, Leslie Kaelbling, Josh Tenenbaum", "author": "Ferran Alet; Javier Lopez-Contreras; James Koppel; Maxwell Nye; Armando Solar-Lezama; Tomas Lozano-Perez; Leslie Kaelbling; Joshua Tenenbaum", "abstract": "A landmark challenge for AI is to learn flexible, powerful representations from small numbers of examples. On an important class of tasks, hypotheses in the form of programs provide extreme generalization capabilities from surprisingly few examples. However, whereas large natural few-shot learning image benchmarks have spurred progress in meta-learning for deep networks, there is no comparably big, natural program-synthesis dataset that can play a similar role. This is because, whereas images are relatively easy to label from internet meta-data or annotated by non-experts, generating meaningful input-output examples for program induction has proven hard to scale. In this work, we propose a new way of leveraging unit tests and natural inputs for small programs as meaningful input-output examples for each sub-program of the overall program. This allows us to create a large-scale naturalistic few-shot program-induction benchmark and propose new challenges in this domain. The evaluation of multiple program induction and synthesis algorithms points to shortcomings of current methods and suggests multiple avenues for future work.", "bibtex": "@InProceedings{pmlr-v139-alet21a,\n title = \t {A large-scale benchmark for few-shot program induction and synthesis},\n author = {Alet, Ferran and Lopez-Contreras, Javier and Koppel, James and Nye, Maxwell and Solar-Lezama, Armando and Lozano-Perez, Tomas and Kaelbling, Leslie and Tenenbaum, Joshua},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {175--186},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/alet21a/alet21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/alet21a.html},\n abstract = \t {A landmark challenge for AI is to learn flexible, powerful representations from small numbers of examples. On an important class of tasks, hypotheses in the form of programs provide extreme generalization capabilities from surprisingly few examples. However, whereas large natural few-shot learning image benchmarks have spurred progress in meta-learning for deep networks, there is no comparably big, natural program-synthesis dataset that can play a similar role. This is because, whereas images are relatively easy to label from internet meta-data or annotated by non-experts, generating meaningful input-output examples for program induction has proven hard to scale. In this work, we propose a new way of leveraging unit tests and natural inputs for small programs as meaningful input-output examples for each sub-program of the overall program. This allows us to create a large-scale naturalistic few-shot program-induction benchmark and propose new challenges in this domain. The evaluation of multiple program induction and synthesis algorithms points to shortcomings of current methods and suggests multiple avenues for future work.}\n}", "pdf": "http://proceedings.mlr.press/v139/alet21a/alet21a.pdf", "supp": "", "pdf_size": 847334, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10208261207056974832&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu;mit.edu; ; ; ; ; ; ", "email": "mit.edu;mit.edu; ; ; ; ; ; ", "github": "", "project": "https://lis.csail.mit.edu/progres", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/alet21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "A statistical perspective on distillation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9065", "id": "9065", "proceeding": "http://proceedings.mlr.press/v139/menon21a.html", "slides": "/media/icml-2021/Slides/9065.pdf", "author_site": "Aditya Menon, Ankit Singh Rawat, Sashank Jakkam Reddi, Seungyeon Kim, Sanjiv Kumar", "author": "Aditya K Menon; Ankit Singh Rawat; Sashank Reddi; Seungyeon Kim; Sanjiv Kumar", "abstract": "Knowledge distillation is a technique for improving a \u201cstudent\u201d model by replacing its one-hot training labels with a label distribution obtained from a \u201cteacher\u201d model. Despite its broad success, several basic questions \u2014 e.g., Why does distillation help? Why do more accurate teachers not necessarily distill better? \u2014 have received limited formal study. In this paper, we present a statistical perspective on distillation which provides an answer to these questions. Our core observation is that a \u201cBayes teacher\u201d providing the true class-probabilities can lower the variance of the student objective, and thus improve performance. We then establish a bias-variance tradeoff that quantifies the value of teachers that approximate the Bayes class-probabilities. This provides a formal criterion as to what constitutes a \u201cgood\u201d teacher, namely, the quality of its probability estimates. Finally, we illustrate how our statistical perspective facilitates novel applications of distillation to bipartite ranking and multiclass retrieval.", "bibtex": "@InProceedings{pmlr-v139-menon21a,\n title = \t {A statistical perspective on distillation},\n author = {Menon, Aditya K and Rawat, Ankit Singh and Reddi, Sashank and Kim, Seungyeon and Kumar, Sanjiv},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7632--7642},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/menon21a/menon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/menon21a.html},\n abstract = \t {Knowledge distillation is a technique for improving a \u201cstudent\u201d model by replacing its one-hot training labels with a label distribution obtained from a \u201cteacher\u201d model. Despite its broad success, several basic questions \u2014 e.g., Why does distillation help? Why do more accurate teachers not necessarily distill better? \u2014 have received limited formal study. In this paper, we present a statistical perspective on distillation which provides an answer to these questions. Our core observation is that a \u201cBayes teacher\u201d providing the true class-probabilities can lower the variance of the student objective, and thus improve performance. We then establish a bias-variance tradeoff that quantifies the value of teachers that approximate the Bayes class-probabilities. This provides a formal criterion as to what constitutes a \u201cgood\u201d teacher, namely, the quality of its probability estimates. Finally, we illustrate how our statistical perspective facilitates novel applications of distillation to bipartite ranking and multiclass retrieval.}\n}", "pdf": "http://proceedings.mlr.press/v139/menon21a/menon21a.pdf", "supp": "", "pdf_size": 3316358, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15107394223914414897&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Google Research, New York; Google Research, New York; Google Research, New York; Google Research, New York; Google Research, New York", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/menon21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "A theory of high dimensional regression with arbitrary correlations between input features and target functions: sample complexity, multiple descent curves and a hierarchy of phase transitions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10415", "id": "10415", "proceeding": "http://proceedings.mlr.press/v139/mel21a.html", "slides": "", "author_site": "Gabriel Mel, Surya Ganguli", "author": "Gabriel Mel; Surya Ganguli", "abstract": "The performance of neural networks depends on precise relationships between four distinct ingredients: the architecture, the loss function, the statistical structure of inputs, and the ground truth target function. Much theoretical work has focused on understanding the role of the first two ingredients under highly simplified models of random uncorrelated data and target functions. In contrast, performance likely relies on a conspiracy between the statistical structure of the input distribution and the structure of the function to be learned. To understand this better we revisit ridge regression in high dimensions, which corresponds to an exceedingly simple architecture and loss function, but we analyze its performance under arbitrary correlations between input features and the target function. We find a rich mathematical structure that includes: (1) a dramatic reduction in sample complexity when the target function aligns with data anisotropy; (2) the existence of multiple descent curves; (3) a sequence of phase transitions in the performance, loss landscape, and optimal regularization as a function of the amount of data that explains the first two effects.", "bibtex": "@InProceedings{pmlr-v139-mel21a,\n title = \t {A theory of high dimensional regression with arbitrary correlations between input features and target functions: sample complexity, multiple descent curves and a hierarchy of phase transitions},\n author = {Mel, Gabriel and Ganguli, Surya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7578--7587},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mel21a/mel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mel21a.html},\n abstract = \t {The performance of neural networks depends on precise relationships between four distinct ingredients: the architecture, the loss function, the statistical structure of inputs, and the ground truth target function. Much theoretical work has focused on understanding the role of the first two ingredients under highly simplified models of random uncorrelated data and target functions. In contrast, performance likely relies on a conspiracy between the statistical structure of the input distribution and the structure of the function to be learned. To understand this better we revisit ridge regression in high dimensions, which corresponds to an exceedingly simple architecture and loss function, but we analyze its performance under arbitrary correlations between input features and the target function. We find a rich mathematical structure that includes: (1) a dramatic reduction in sample complexity when the target function aligns with data anisotropy; (2) the existence of multiple descent curves; (3) a sequence of phase transitions in the performance, loss landscape, and optimal regularization as a function of the amount of data that explains the first two effects.}\n}", "pdf": "http://proceedings.mlr.press/v139/mel21a/mel21a.pdf", "supp": "", "pdf_size": 5090447, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8205161659078882591&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Neurosciences PhD Program, School of Medicine, Stanford University, CA, US; Department of Applied Physics, Stanford, CA, US", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/mel21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "School of Medicine", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;1", "aff_campus_unique": "CA;Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "ACE: Explaining cluster from an adversarial perspective", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10155", "id": "10155", "proceeding": "http://proceedings.mlr.press/v139/lu21e.html", "slides": "", "author_site": "Yang Lu, Timothy C Yu, Giancarlo Bonora, William Stafford Noble", "author": "Yang Young Lu; Timothy C Yu; Giancarlo Bonora; William Stafford Noble", "abstract": "A common workflow in single-cell RNA-seq analysis is to project the data to a latent space, cluster the cells in that space, and identify sets of marker genes that explain the differences among the discovered clusters. A primary drawback to this three-step procedure is that each step is carried out independently, thereby neglecting the effects of the nonlinear embedding and inter-gene dependencies on the selection of marker genes. Here we propose an integrated deep learning framework, Adversarial Clustering Explanation (ACE), that bundles all three steps into a single workflow. The method thus moves away from the notion of \"marker genes\" to instead identify a panel of explanatory genes. This panel may include genes that are not only enriched but also depleted relative to other cell types, as well as genes that exhibit differences between closely related cell types. Empirically, we demonstrate that ACE is able to identify gene panels that are both highly discriminative and nonredundant, and we demonstrate the applicability of ACE to an image recognition task.", "bibtex": "@InProceedings{pmlr-v139-lu21e,\n title = \t {ACE: Explaining cluster from an adversarial perspective},\n author = {Lu, Yang Young and Yu, Timothy C and Bonora, Giancarlo and Noble, William Stafford},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7156--7167},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lu21e/lu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/lu21e.html},\n abstract = \t {A common workflow in single-cell RNA-seq analysis is to project the data to a latent space, cluster the cells in that space, and identify sets of marker genes that explain the differences among the discovered clusters. A primary drawback to this three-step procedure is that each step is carried out independently, thereby neglecting the effects of the nonlinear embedding and inter-gene dependencies on the selection of marker genes. Here we propose an integrated deep learning framework, Adversarial Clustering Explanation (ACE), that bundles all three steps into a single workflow. The method thus moves away from the notion of \"marker genes\" to instead identify a panel of explanatory genes. This panel may include genes that are not only enriched but also depleted relative to other cell types, as well as genes that exhibit differences between closely related cell types. Empirically, we demonstrate that ACE is able to identify gene panels that are both highly discriminative and nonredundant, and we demonstrate the applicability of ACE to an image recognition task.}\n}", "pdf": "http://proceedings.mlr.press/v139/lu21e/lu21e.pdf", "supp": "", "pdf_size": 6937554, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5383573453567982592&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "bitbucket.org/noblelab/ace", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lu21e.html" }, { "title": "ADOM: Accelerated Decentralized Optimization Method for Time-Varying Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10125", "id": "10125", "proceeding": "http://proceedings.mlr.press/v139/kovalev21a.html", "slides": "/media/icml-2021/Slides/10125.pdf", "author_site": "Dmitry Kovalev, Egor Shulgin, Peter Richtarik, Alexander Rogozin, Alexander Gasnikov", "author": "Dmitry Kovalev; Egor Shulgin; Peter Richtarik; Alexander V Rogozin; Alexander Gasnikov", "abstract": "We propose ADOM \u2013 an accelerated method for smooth and strongly convex decentralized optimization over time-varying networks. ADOM uses a dual oracle, i.e., we assume access to the gradient of the Fenchel conjugate of the individual loss functions. Up to a constant factor, which depends on the network structure only, its communication complexity is the same as that of accelerated Nesterov gradient method. To the best of our knowledge, only the algorithm of Rogozin et al. (2019) has a convergence rate with similar properties. However, their algorithm converges under the very restrictive assumption that the number of network changes can not be greater than a tiny percentage of the number of iterations. This assumption is hard to satisfy in practice, as the network topology changes usually can not be controlled. In contrast, ADOM merely requires the network to stay connected throughout time.", "bibtex": "@InProceedings{pmlr-v139-kovalev21a,\n title = \t {ADOM: Accelerated Decentralized Optimization Method for Time-Varying Networks},\n author = {Kovalev, Dmitry and Shulgin, Egor and Richtarik, Peter and Rogozin, Alexander V and Gasnikov, Alexander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5784--5793},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kovalev21a/kovalev21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kovalev21a.html},\n abstract = \t {We propose ADOM \u2013 an accelerated method for smooth and strongly convex decentralized optimization over time-varying networks. ADOM uses a dual oracle, i.e., we assume access to the gradient of the Fenchel conjugate of the individual loss functions. Up to a constant factor, which depends on the network structure only, its communication complexity is the same as that of accelerated Nesterov gradient method. To the best of our knowledge, only the algorithm of Rogozin et al. (2019) has a convergence rate with similar properties. However, their algorithm converges under the very restrictive assumption that the number of network changes can not be greater than a tiny percentage of the number of iterations. This assumption is hard to satisfy in practice, as the network topology changes usually can not be controlled. In contrast, ADOM merely requires the network to stay connected throughout time.}\n}", "pdf": "http://proceedings.mlr.press/v139/kovalev21a/kovalev21a.pdf", "supp": "", "pdf_size": 4753129, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13055424477930940340&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "King Abdullah University of Science and Technology; King Abdullah University of Science and Technology; King Abdullah University of Science and Technology; Moscow Institute of Physics and Technology + Higher School of Economics; Moscow Institute of Physics and Technology + Higher School of Economics", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kovalev21a.html", "aff_unique_index": "0;0;0;1+2;1+2", "aff_unique_norm": "King Abdullah University of Science and Technology;Moscow Institute of Physics and Technology;Higher School of Economics", "aff_unique_dep": ";;", "aff_unique_url": "https://www.kast.kau.edu.sa;https://www.mipt.ru/en;https://www.hse.ru", "aff_unique_abbr": "KAUST;MIPT;HSE", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1+1;1+1", "aff_country_unique": "Saudi Arabia;Russian Federation" }, { "title": "AGENT: A Benchmark for Core Psychological Reasoning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10211", "id": "10211", "proceeding": "http://proceedings.mlr.press/v139/shu21a.html", "slides": "/media/icml-2021/Slides/10211.pdf", "author_site": "Tianmin Shu, Abhishek Bhandwaldar, Chuang Gan, Kevin Smith, Shari Liu, Dan Gutfreund, Elizabeth Spelke, Josh Tenenbaum, Tomer Ullman", "author": "Tianmin Shu; Abhishek Bhandwaldar; Chuang Gan; Kevin Smith; Shari Liu; Dan Gutfreund; Elizabeth Spelke; Joshua Tenenbaum; Tomer Ullman", "abstract": "For machine agents to successfully interact with humans in real-world settings, they will need to develop an understanding of human mental life. Intuitive psychology, the ability to reason about hidden mental variables that drive observable actions, comes naturally to people: even pre-verbal infants can tell agents from objects, expecting agents to act efficiently to achieve goals given constraints. Despite recent interest in machine agents that reason about other agents, it is not clear if such agents learn or hold the core psychology principles that drive human reasoning. Inspired by cognitive development studies on intuitive psychology, we present a benchmark consisting of a large dataset of procedurally generated 3D animations, AGENT (Action, Goal, Efficiency, coNstraint, uTility), structured around four scenarios (goal preferences, action efficiency, unobserved constraints, and cost-reward trade-offs) that probe key concepts of core intuitive psychology. We validate AGENT with human-ratings, propose an evaluation protocol emphasizing generalization, and compare two strong baselines built on Bayesian inverse planning and a Theory of Mind neural network. Our results suggest that to pass the designed tests of core intuitive psychology at human levels, a model must acquire or have built-in representations of how agents plan, combining utility computations and core knowledge of objects and physics.", "bibtex": "@InProceedings{pmlr-v139-shu21a,\n title = \t {AGENT: A Benchmark for Core Psychological Reasoning},\n author = {Shu, Tianmin and Bhandwaldar, Abhishek and Gan, Chuang and Smith, Kevin and Liu, Shari and Gutfreund, Dan and Spelke, Elizabeth and Tenenbaum, Joshua and Ullman, Tomer},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9614--9625},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shu21a/shu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shu21a.html},\n abstract = \t {For machine agents to successfully interact with humans in real-world settings, they will need to develop an understanding of human mental life. Intuitive psychology, the ability to reason about hidden mental variables that drive observable actions, comes naturally to people: even pre-verbal infants can tell agents from objects, expecting agents to act efficiently to achieve goals given constraints. Despite recent interest in machine agents that reason about other agents, it is not clear if such agents learn or hold the core psychology principles that drive human reasoning. Inspired by cognitive development studies on intuitive psychology, we present a benchmark consisting of a large dataset of procedurally generated 3D animations, AGENT (Action, Goal, Efficiency, coNstraint, uTility), structured around four scenarios (goal preferences, action efficiency, unobserved constraints, and cost-reward trade-offs) that probe key concepts of core intuitive psychology. We validate AGENT with human-ratings, propose an evaluation protocol emphasizing generalization, and compare two strong baselines built on Bayesian inverse planning and a Theory of Mind neural network. Our results suggest that to pass the designed tests of core intuitive psychology at human levels, a model must acquire or have built-in representations of how agents plan, combining utility computations and core knowledge of objects and physics.}\n}", "pdf": "http://proceedings.mlr.press/v139/shu21a/shu21a.pdf", "supp": "", "pdf_size": 3345707, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9729067071974484204&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Massachusetts Institute of Technology; MIT-IBM Watson AI Lab; MIT-IBM Watson AI Lab; Massachusetts Institute of Technology; Massachusetts Institute of Technology; MIT-IBM Watson AI Lab; Harvard University; Massachusetts Institute of Technology; Harvard University", "aff_domain": "mit.edu; ; ; ; ; ; ; ; ", "email": "mit.edu; ; ; ; ; ; ; ; ", "github": "", "project": "https://www.tshu.io/AGENT", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/shu21a.html", "aff_unique_index": "0;0;0;0;0;0;1;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Harvard University", "aff_unique_dep": ";", "aff_unique_url": "https://web.mit.edu;https://www.harvard.edu", "aff_unique_abbr": "MIT;Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "APS: Active Pretraining with Successor Features", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9241", "id": "9241", "proceeding": "http://proceedings.mlr.press/v139/liu21b.html", "slides": "/media/icml-2021/Slides/9241.pdf", "author_site": "Hao Liu, Pieter Abbeel", "author": "Hao Liu; Pieter Abbeel", "abstract": "We introduce a new unsupervised pretraining objective for reinforcement learning. During the unsupervised reward-free pretraining phase, the agent maximizes mutual information between tasks and states induced by the policy. Our key contribution is a novel lower bound of this intractable quantity. We show that by reinterpreting and combining variational successor features\u00a0\\citep{Hansen2020Fast} with nonparametric entropy maximization\u00a0\\citep{liu2021behavior}, the intractable mutual information can be efficiently optimized. The proposed method Active Pretraining with Successor Feature (APS) explores the environment via nonparametric entropy maximization, and the explored data can be efficiently leveraged to learn behavior by variational successor features. APS addresses the limitations of existing mutual information maximization based and entropy maximization based unsupervised RL, and combines the best of both worlds. When evaluated on the Atari 100k data-efficiency benchmark, our approach significantly outperforms previous methods combining unsupervised pretraining with task-specific finetuning.", "bibtex": "@InProceedings{pmlr-v139-liu21b,\n title = \t {APS: Active Pretraining with Successor Features},\n author = {Liu, Hao and Abbeel, Pieter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6736--6747},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21b/liu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21b.html},\n abstract = \t {We introduce a new unsupervised pretraining objective for reinforcement learning. During the unsupervised reward-free pretraining phase, the agent maximizes mutual information between tasks and states induced by the policy. Our key contribution is a novel lower bound of this intractable quantity. We show that by reinterpreting and combining variational successor features\u00a0\\citep{Hansen2020Fast} with nonparametric entropy maximization\u00a0\\citep{liu2021behavior}, the intractable mutual information can be efficiently optimized. The proposed method Active Pretraining with Successor Feature (APS) explores the environment via nonparametric entropy maximization, and the explored data can be efficiently leveraged to learn behavior by variational successor features. APS addresses the limitations of existing mutual information maximization based and entropy maximization based unsupervised RL, and combines the best of both worlds. When evaluated on the Atari 100k data-efficiency benchmark, our approach significantly outperforms previous methods combining unsupervised pretraining with task-specific finetuning.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21b/liu21b.pdf", "supp": "", "pdf_size": 1544333, "gs_citation": 168, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16577440194793035340&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of California, Berkeley, CA, USA; University of California, Berkeley, CA, USA", "aff_domain": "cs.berkeley.edu; ", "email": "cs.berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/liu21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "ARMS: Antithetic-REINFORCE-Multi-Sample Gradient for Binary Variables", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9775", "id": "9775", "proceeding": "http://proceedings.mlr.press/v139/dimitriev21a.html", "slides": "/media/icml-2021/Slides/9775.pdf", "author_site": "Alek Dimitriev, Mingyuan Zhou", "author": "Aleksandar Dimitriev; Mingyuan Zhou", "abstract": "Estimating the gradients for binary variables is a task that arises frequently in various domains, such as training discrete latent variable models. What has been commonly used is a REINFORCE based Monte Carlo estimation method that uses either independent samples or pairs of negatively correlated samples. To better utilize more than two samples, we propose ARMS, an Antithetic REINFORCE-based Multi-Sample gradient estimator. ARMS uses a copula to generate any number of mutually antithetic samples. It is unbiased, has low variance, and generalizes both DisARM, which we show to be ARMS with two samples, and the leave-one-out REINFORCE (LOORF) estimator, which is ARMS with uncorrelated samples. We evaluate ARMS on several datasets for training generative models, and our experimental results show that it outperforms competing methods. We also develop a version of ARMS for optimizing the multi-sample variational bound, and show that it outperforms both VIMCO and DisARM. The code is publicly available.", "bibtex": "@InProceedings{pmlr-v139-dimitriev21a,\n title = \t {ARMS: Antithetic-REINFORCE-Multi-Sample Gradient for Binary Variables},\n author = {Dimitriev, Aleksandar and Zhou, Mingyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2717--2727},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dimitriev21a/dimitriev21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dimitriev21a.html},\n abstract = \t {Estimating the gradients for binary variables is a task that arises frequently in various domains, such as training discrete latent variable models. What has been commonly used is a REINFORCE based Monte Carlo estimation method that uses either independent samples or pairs of negatively correlated samples. To better utilize more than two samples, we propose ARMS, an Antithetic REINFORCE-based Multi-Sample gradient estimator. ARMS uses a copula to generate any number of mutually antithetic samples. It is unbiased, has low variance, and generalizes both DisARM, which we show to be ARMS with two samples, and the leave-one-out REINFORCE (LOORF) estimator, which is ARMS with uncorrelated samples. We evaluate ARMS on several datasets for training generative models, and our experimental results show that it outperforms competing methods. We also develop a version of ARMS for optimizing the multi-sample variational bound, and show that it outperforms both VIMCO and DisARM. The code is publicly available.}\n}", "pdf": "http://proceedings.mlr.press/v139/dimitriev21a/dimitriev21a.pdf", "supp": "", "pdf_size": 6424999, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=546385727654075781&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "McCombs School of Business, The University of Texas at Austin, Austin, Texas 78712, USA; McCombs School of Business, The University of Texas at Austin, Austin, Texas 78712, USA", "aff_domain": "utexas.edu;mccombs.utexas.edu", "email": "utexas.edu;mccombs.utexas.edu", "github": "https://github.com/alekdimi/arms", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/dimitriev21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "McCombs School of Business", "aff_unique_url": "https://www.mccombs.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "ASAM: Adaptive Sharpness-Aware Minimization for Scale-Invariant Learning of Deep Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9261", "id": "9261", "proceeding": "http://proceedings.mlr.press/v139/kwon21b.html", "slides": "", "author_site": "Jungmin Kwon, Jeongseop Kim, Hyunseo Park, In Kwon Choi", "author": "Jungmin Kwon; Jeongseop Kim; Hyunseo Park; In Kwon Choi", "abstract": "Recently, learning algorithms motivated from sharpness of loss surface as an effective measure of generalization gap have shown state-of-the-art performances. Nevertheless, sharpness defined in a rigid region with a fixed radius, has a drawback in sensitivity to parameter re-scaling which leaves the loss unaffected, leading to weakening of the connection between sharpness and generalization gap. In this paper, we introduce the concept of adaptive sharpness which is scale-invariant and propose the corresponding generalization bound. We suggest a novel learning method, adaptive sharpness-aware minimization (ASAM), utilizing the proposed generalization bound. Experimental results in various benchmark datasets show that ASAM contributes to significant improvement of model generalization performance.", "bibtex": "@InProceedings{pmlr-v139-kwon21b,\n title = \t {ASAM: Adaptive Sharpness-Aware Minimization for Scale-Invariant Learning of Deep Neural Networks},\n author = {Kwon, Jungmin and Kim, Jeongseop and Park, Hyunseo and Choi, In Kwon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5905--5914},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kwon21b/kwon21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kwon21b.html},\n abstract = \t {Recently, learning algorithms motivated from sharpness of loss surface as an effective measure of generalization gap have shown state-of-the-art performances. Nevertheless, sharpness defined in a rigid region with a fixed radius, has a drawback in sensitivity to parameter re-scaling which leaves the loss unaffected, leading to weakening of the connection between sharpness and generalization gap. In this paper, we introduce the concept of adaptive sharpness which is scale-invariant and propose the corresponding generalization bound. We suggest a novel learning method, adaptive sharpness-aware minimization (ASAM), utilizing the proposed generalization bound. Experimental results in various benchmark datasets show that ASAM contributes to significant improvement of model generalization performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/kwon21b/kwon21b.pdf", "supp": "", "pdf_size": 1983798, "gs_citation": 385, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8550448363439632053&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Samsung Research; Samsung Research; Samsung Research; Samsung Research", "aff_domain": ";samsung.com; ; ", "email": ";samsung.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kwon21b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Samsung", "aff_unique_dep": "Samsung Research", "aff_unique_url": "https://research.samsung.com", "aff_unique_abbr": "Samsung", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "South Korea" }, { "title": "Accelerate CNNs from Three Dimensions: A Comprehensive Pruning Framework", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9081", "id": "9081", "proceeding": "http://proceedings.mlr.press/v139/wang21e.html", "slides": "", "author_site": "Wenxiao Wang, Minghao Chen, Shuai Zhao, Long Chen, Jinming Hu, Haifeng Liu, Deng Cai, Xiaofei He, Wei Liu", "author": "Wenxiao Wang; Minghao Chen; Shuai Zhao; Long Chen; Jinming Hu; Haifeng Liu; Deng Cai; Xiaofei He; Wei Liu", "abstract": "Most neural network pruning methods, such as filter-level and layer-level prunings, prune the network model along one dimension (depth, width, or resolution) solely to meet a computational budget. However, such a pruning policy often leads to excessive reduction of that dimension, thus inducing a huge accuracy loss. To alleviate this issue, we argue that pruning should be conducted along three dimensions comprehensively. For this purpose, our pruning framework formulates pruning as an optimization problem. Specifically, it first casts the relationships between a certain model\u2019s accuracy and depth/width/resolution into a polynomial regression and then maximizes the polynomial to acquire the optimal values for the three dimensions. Finally, the model is pruned along the three optimal dimensions accordingly. In this framework, since collecting too much data for training the regression is very time-costly, we propose two approaches to lower the cost: 1) specializing the polynomial to ensure an accurate regression even with less training data; 2) employing iterative pruning and fine-tuning to collect the data faster. Extensive experiments show that our proposed algorithm surpasses state-of-the-art pruning algorithms and even neural architecture search-based algorithms.", "bibtex": "@InProceedings{pmlr-v139-wang21e,\n title = \t {Accelerate CNNs from Three Dimensions: A Comprehensive Pruning Framework},\n author = {Wang, Wenxiao and Chen, Minghao and Zhao, Shuai and Chen, Long and Hu, Jinming and Liu, Haifeng and Cai, Deng and He, Xiaofei and Liu, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10717--10726},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21e/wang21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21e.html},\n abstract = \t {Most neural network pruning methods, such as filter-level and layer-level prunings, prune the network model along one dimension (depth, width, or resolution) solely to meet a computational budget. However, such a pruning policy often leads to excessive reduction of that dimension, thus inducing a huge accuracy loss. To alleviate this issue, we argue that pruning should be conducted along three dimensions comprehensively. For this purpose, our pruning framework formulates pruning as an optimization problem. Specifically, it first casts the relationships between a certain model\u2019s accuracy and depth/width/resolution into a polynomial regression and then maximizes the polynomial to acquire the optimal values for the three dimensions. Finally, the model is pruned along the three optimal dimensions accordingly. In this framework, since collecting too much data for training the regression is very time-costly, we propose two approaches to lower the cost: 1) specializing the polynomial to ensure an accurate regression even with less training data; 2) employing iterative pruning and fine-tuning to collect the data faster. Extensive experiments show that our proposed algorithm surpasses state-of-the-art pruning algorithms and even neural architecture search-based algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21e/wang21e.pdf", "supp": "", "pdf_size": 3312563, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5816330779940152230&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "State Key Lab of CAD&CG, Zhejiang University, China+Tencent Data Platform, China; State Key Lab of CAD&CG, Zhejiang University, China; State Key Lab of CAD&CG, Zhejiang University, China; Columbia University, US+Tencent, China; State Key Lab of CAD&CG, Zhejiang University, China; State Key Lab of CAD&CG, Zhejiang University, China; State Key Lab of CAD&CG, Zhejiang University, China; State Key Lab of CAD&CG, Zhejiang University, China; Tencent Data Platform, China", "aff_domain": "gmail.com; ; ; ; ; ; ; ; ", "email": "gmail.com; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/wang21e.html", "aff_unique_index": "0+1;0;0;2+1;0;0;0;0;1", "aff_unique_norm": "Zhejiang University;Tencent;Columbia University", "aff_unique_dep": "State Key Lab of CAD&CG;Tencent Data Platform;", "aff_unique_url": "http://www.zju.edu.cn;https://www.tencent.com;https://www.columbia.edu", "aff_unique_abbr": "ZJU;Tencent;Columbia", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;1+0;0;0;0;0;0", "aff_country_unique": "China;United States" }, { "title": "Accelerated Algorithms for Smooth Convex-Concave Minimax Problems with O(1/k^2) Rate on Squared Gradient Norm", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10227", "id": "10227", "proceeding": "http://proceedings.mlr.press/v139/yoon21d.html", "slides": "", "author_site": "TaeHo Yoon, Ernest Ryu", "author": "Taeho Yoon; Ernest K Ryu", "abstract": "In this work, we study the computational complexity of reducing the squared gradient magnitude for smooth minimax optimization problems. First, we present algorithms with accelerated $\\mathcal{O}(1/k^2)$ last-iterate rates, faster than the existing $\\mathcal{O}(1/k)$ or slower rates for extragradient, Popov, and gradient descent with anchoring. The acceleration mechanism combines extragradient steps with anchoring and is distinct from Nesterov\u2019s acceleration. We then establish optimality of the $\\mathcal{O}(1/k^2)$ rate through a matching lower bound.", "bibtex": "@InProceedings{pmlr-v139-yoon21d,\n title = \t {Accelerated Algorithms for Smooth Convex-Concave Minimax Problems with O(1/k^2) Rate on Squared Gradient Norm},\n author = {Yoon, Taeho and Ryu, Ernest K},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12098--12109},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoon21d/yoon21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoon21d.html},\n abstract = \t {In this work, we study the computational complexity of reducing the squared gradient magnitude for smooth minimax optimization problems. First, we present algorithms with accelerated $\\mathcal{O}(1/k^2)$ last-iterate rates, faster than the existing $\\mathcal{O}(1/k)$ or slower rates for extragradient, Popov, and gradient descent with anchoring. The acceleration mechanism combines extragradient steps with anchoring and is distinct from Nesterov\u2019s acceleration. We then establish optimality of the $\\mathcal{O}(1/k^2)$ rate through a matching lower bound.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoon21d/yoon21d.pdf", "supp": "", "pdf_size": 3357951, "gs_citation": 137, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9457836096897231231&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 2, "aff": "Department of Mathematical Sciences, Seoul National University, Seoul, Korea; Department of Mathematical Sciences, Seoul National University, Seoul, Korea", "aff_domain": "snu.ac.kr;snu.ac.kr", "email": "snu.ac.kr;snu.ac.kr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yoon21d.html", "aff_unique_index": "0;0", "aff_unique_norm": "Seoul National University", "aff_unique_dep": "Department of Mathematical Sciences", "aff_unique_url": "https://www.snu.ac.kr", "aff_unique_abbr": "SNU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0;0", "aff_country_unique": "South Korea" }, { "title": "Accelerating Feedforward Computation via Parallel Nonlinear Equation Solving", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9397", "id": "9397", "proceeding": "http://proceedings.mlr.press/v139/song21a.html", "slides": "", "author_site": "Yang Song, Chenlin Meng, Renjie Liao, Stefano Ermon", "author": "Yang Song; Chenlin Meng; Renjie Liao; Stefano Ermon", "abstract": "Feedforward computation, such as evaluating a neural network or sampling from an autoregressive model, is ubiquitous in machine learning. The sequential nature of feedforward computation, however, requires a strict order of execution and cannot be easily accelerated with parallel computing. To enable parallelization, we frame the task of feedforward computation as solving a system of nonlinear equations. We then propose to find the solution using a Jacobi or Gauss-Seidel fixed-point iteration method, as well as hybrid methods of both. Crucially, Jacobi updates operate independently on each equation and can be executed in parallel. Our method is guaranteed to give exactly the same values as the original feedforward computation with a reduced (or equal) number of parallelizable iterations, and hence reduced time given sufficient parallel computing power. Experimentally, we demonstrate the effectiveness of our approach in accelerating (i) backpropagation of RNNs, (ii) evaluation of DenseNets, and (iii) autoregressive sampling of MADE and PixelCNN++, with speedup factors between 2.1 and 26 under various settings.", "bibtex": "@InProceedings{pmlr-v139-song21a,\n title = \t {Accelerating Feedforward Computation via Parallel Nonlinear Equation Solving},\n author = {Song, Yang and Meng, Chenlin and Liao, Renjie and Ermon, Stefano},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9791--9800},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/song21a/song21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/song21a.html},\n abstract = \t {Feedforward computation, such as evaluating a neural network or sampling from an autoregressive model, is ubiquitous in machine learning. The sequential nature of feedforward computation, however, requires a strict order of execution and cannot be easily accelerated with parallel computing. To enable parallelization, we frame the task of feedforward computation as solving a system of nonlinear equations. We then propose to find the solution using a Jacobi or Gauss-Seidel fixed-point iteration method, as well as hybrid methods of both. Crucially, Jacobi updates operate independently on each equation and can be executed in parallel. Our method is guaranteed to give exactly the same values as the original feedforward computation with a reduced (or equal) number of parallelizable iterations, and hence reduced time given sufficient parallel computing power. Experimentally, we demonstrate the effectiveness of our approach in accelerating (i) backpropagation of RNNs, (ii) evaluation of DenseNets, and (iii) autoregressive sampling of MADE and PixelCNN++, with speedup factors between 2.1 and 26 under various settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/song21a/song21a.pdf", "supp": "", "pdf_size": 2192501, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9587891109353811026&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University; Department of Computer Science, University of Toronto + Vector Institute; Computer Science Department, Stanford University", "aff_domain": "cs.stanford.edu; ; ;cs.stanford.edu", "email": "cs.stanford.edu; ; ;cs.stanford.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/song21a.html", "aff_unique_index": "0;0;1+2;0", "aff_unique_norm": "Stanford University;University of Toronto;Vector Institute", "aff_unique_dep": "Computer Science Department;Department of Computer Science;", "aff_unique_url": "https://www.stanford.edu;https://www.utoronto.ca;https://vectorinstitute.ai/", "aff_unique_abbr": "Stanford;U of T;Vector Institute", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Stanford;Toronto;", "aff_country_unique_index": "0;0;1+1;0", "aff_country_unique": "United States;Canada" }, { "title": "Accelerating Gossip SGD with Periodic Global Averaging", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8735", "id": "8735", "proceeding": "http://proceedings.mlr.press/v139/chen21y.html", "slides": "", "author_site": "Yiming Chen, Kun Yuan, Yingya Zhang, Pan Pan, Yinghui Xu, Wotao Yin", "author": "Yiming Chen; Kun Yuan; Yingya Zhang; Pan Pan; Yinghui Xu; Wotao Yin", "abstract": "Communication overhead hinders the scalability of large-scale distributed training. Gossip SGD, where each node averages only with its neighbors, is more communication-efficient than the prevalent parallel SGD. However, its convergence rate is reversely proportional to quantity $1-\\beta$ which measures the network connectivity. On large and sparse networks where $1-\\beta \\to 0$, Gossip SGD requires more iterations to converge, which offsets against its communication benefit. This paper introduces Gossip-PGA, which adds Periodic Global Averaging to accelerate Gossip SGD. Its transient stage, i.e., the iterations required to reach asymptotic linear speedup stage, improves from $\\Omega(\\beta^4 n^3/(1-\\beta)^4)$ to $\\Omega(\\beta^4 n^3 H^4)$ for non-convex problems. The influence of network topology in Gossip-PGA can be controlled by the averaging period $H$. Its transient-stage complexity is also superior to local SGD which has order $\\Omega(n^3 H^4)$. Empirical results of large-scale training on image classification (ResNet50) and language modeling (BERT) validate our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-chen21y,\n title = \t {Accelerating Gossip SGD with Periodic Global Averaging},\n author = {Chen, Yiming and Yuan, Kun and Zhang, Yingya and Pan, Pan and Xu, Yinghui and Yin, Wotao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1791--1802},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21y/chen21y.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21y.html},\n abstract = \t {Communication overhead hinders the scalability of large-scale distributed training. Gossip SGD, where each node averages only with its neighbors, is more communication-efficient than the prevalent parallel SGD. However, its convergence rate is reversely proportional to quantity $1-\\beta$ which measures the network connectivity. On large and sparse networks where $1-\\beta \\to 0$, Gossip SGD requires more iterations to converge, which offsets against its communication benefit. This paper introduces Gossip-PGA, which adds Periodic Global Averaging to accelerate Gossip SGD. Its transient stage, i.e., the iterations required to reach asymptotic linear speedup stage, improves from $\\Omega(\\beta^4 n^3/(1-\\beta)^4)$ to $\\Omega(\\beta^4 n^3 H^4)$ for non-convex problems. The influence of network topology in Gossip-PGA can be controlled by the averaging period $H$. Its transient-stage complexity is also superior to local SGD which has order $\\Omega(n^3 H^4)$. Empirical results of large-scale training on image classification (ResNet50) and language modeling (BERT) validate our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21y/chen21y.pdf", "supp": "", "pdf_size": 1120529, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9948620238504720759&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China", "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/chen21y.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Alibaba Group", "aff_unique_dep": "", "aff_unique_url": "https://www.alibaba.com", "aff_unique_abbr": "Alibaba", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Hangzhou", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Accelerating Safe Reinforcement Learning with Constraint-mismatched Baseline Policies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8451", "id": "8451", "proceeding": "http://proceedings.mlr.press/v139/yang21i.html", "slides": "", "author_site": "Jimmy Yang, Justinian Rosca, Karthik Narasimhan, Peter Ramadge", "author": "Tsung-Yen Yang; Justinian Rosca; Karthik Narasimhan; Peter J Ramadge", "abstract": "We consider the problem of reinforcement learning when provided with (1) a baseline control policy and (2) a set of constraints that the learner must satisfy. The baseline policy can arise from demonstration data or a teacher agent and may provide useful cues for learning, but it might also be sub-optimal for the task at hand, and is not guaranteed to satisfy the specified constraints, which might encode safety, fairness or other application-specific requirements. In order to safely learn from baseline policies, we propose an iterative policy optimization algorithm that alternates between maximizing expected return on the task, minimizing distance to the baseline policy, and projecting the policy onto the constraint-satisfying set. We analyze our algorithm theoretically and provide a finite-time convergence guarantee. In our experiments on five different control tasks, our algorithm consistently outperforms several state-of-the-art baselines, achieving 10 times fewer constraint violations and 40% higher reward on average.", "bibtex": "@InProceedings{pmlr-v139-yang21i,\n title = \t {Accelerating Safe Reinforcement Learning with Constraint-mismatched Baseline Policies},\n author = {Yang, Tsung-Yen and Rosca, Justinian and Narasimhan, Karthik and Ramadge, Peter J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11795--11807},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21i/yang21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21i.html},\n abstract = \t {We consider the problem of reinforcement learning when provided with (1) a baseline control policy and (2) a set of constraints that the learner must satisfy. The baseline policy can arise from demonstration data or a teacher agent and may provide useful cues for learning, but it might also be sub-optimal for the task at hand, and is not guaranteed to satisfy the specified constraints, which might encode safety, fairness or other application-specific requirements. In order to safely learn from baseline policies, we propose an iterative policy optimization algorithm that alternates between maximizing expected return on the task, minimizing distance to the baseline policy, and projecting the policy onto the constraint-satisfying set. We analyze our algorithm theoretically and provide a finite-time convergence guarantee. In our experiments on five different control tasks, our algorithm consistently outperforms several state-of-the-art baselines, achieving 10 times fewer constraint violations and 40% higher reward on average.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21i/yang21i.pdf", "supp": "", "pdf_size": 5536162, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6249504571925712479&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Princeton University; Siemens Corporation, Corporate Technology; Princeton University; Princeton University", "aff_domain": "princeton.edu; ; ; ", "email": "princeton.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yang21i.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Princeton University;Siemens Corporation", "aff_unique_dep": ";Corporate Technology", "aff_unique_url": "https://www.princeton.edu;https://www.siemens.com", "aff_unique_abbr": "Princeton;Siemens", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;Germany" }, { "title": "Acceleration via Fractal Learning Rate Schedules", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9173", "id": "9173", "proceeding": "http://proceedings.mlr.press/v139/agarwal21a.html", "slides": "", "author_site": "Naman Agarwal, Surbhi Goel, Cyril Zhang", "author": "Naman Agarwal; Surbhi Goel; Cyril Zhang", "abstract": "In practical applications of iterative first-order optimization, the learning rate schedule remains notoriously difficult to understand and expensive to tune. We demonstrate the presence of these subtleties even in the innocuous case when the objective is a convex quadratic. We reinterpret an iterative algorithm from the numerical analysis literature as what we call the Chebyshev learning rate schedule for accelerating vanilla gradient descent, and show that the problem of mitigating instability leads to a fractal ordering of step sizes. We provide some experiments to challenge conventional beliefs about stable learning rates in deep learning: the fractal schedule enables training to converge with locally unstable updates which make negative progress on the objective.", "bibtex": "@InProceedings{pmlr-v139-agarwal21a,\n title = \t {Acceleration via Fractal Learning Rate Schedules},\n author = {Agarwal, Naman and Goel, Surbhi and Zhang, Cyril},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {87--99},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/agarwal21a/agarwal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/agarwal21a.html},\n abstract = \t {In practical applications of iterative first-order optimization, the learning rate schedule remains notoriously difficult to understand and expensive to tune. We demonstrate the presence of these subtleties even in the innocuous case when the objective is a convex quadratic. We reinterpret an iterative algorithm from the numerical analysis literature as what we call the Chebyshev learning rate schedule for accelerating vanilla gradient descent, and show that the problem of mitigating instability leads to a fractal ordering of step sizes. We provide some experiments to challenge conventional beliefs about stable learning rates in deep learning: the fractal schedule enables training to converge with locally unstable updates which make negative progress on the objective.}\n}", "pdf": "http://proceedings.mlr.press/v139/agarwal21a/agarwal21a.pdf", "supp": "", "pdf_size": 640997, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10801505570034728165&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google AI Princeton, Princeton, NJ, USA; Microsoft Research, New York, NY, USA; Microsoft Research, New York, NY, USA", "aff_domain": "microsoft.com; ; ", "email": "microsoft.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/agarwal21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Google;Microsoft", "aff_unique_dep": "Google AI;Microsoft Research", "aff_unique_url": "https://ai.google;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Google AI;MSR", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Princeton;New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Accumulated Decoupled Learning with Gradient Staleness Mitigation for Convolutional Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9061", "id": "9061", "proceeding": "http://proceedings.mlr.press/v139/zhuang21a.html", "slides": "", "author_site": "Huiping Zhuang, Zhenyu Weng, Fulin Luo, Kar-Ann Toh, Haizhou Li, Zhiping Lin", "author": "Huiping Zhuang; Zhenyu Weng; Fulin Luo; Toh Kar-Ann; Haizhou Li; Zhiping Lin", "abstract": "Gradient staleness is a major side effect in decoupled learning when training convolutional neural networks asynchronously. Existing methods that ignore this effect might result in reduced generalization and even divergence. In this paper, we propose an accumulated decoupled learning (ADL), which includes a module-wise gradient accumulation in order to mitigate the gradient staleness. Unlike prior arts ignoring the gradient staleness, we quantify the staleness in such a way that its mitigation can be quantitatively visualized. As a new learning scheme, the proposed ADL is theoretically shown to converge to critical points in spite of its asynchronism. Extensive experiments on CIFAR-10 and ImageNet datasets are conducted, demonstrating that ADL gives promising generalization results while the state-of-the-art methods experience reduced generalization and divergence. In addition, our ADL is shown to have the fastest training speed among the compared methods.", "bibtex": "@InProceedings{pmlr-v139-zhuang21a,\n title = \t {Accumulated Decoupled Learning with Gradient Staleness Mitigation for Convolutional Neural Networks},\n author = {Zhuang, Huiping and Weng, Zhenyu and Luo, Fulin and Kar-Ann, Toh and Li, Haizhou and Lin, Zhiping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12935--12944},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhuang21a/zhuang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhuang21a.html},\n abstract = \t {Gradient staleness is a major side effect in decoupled learning when training convolutional neural networks asynchronously. Existing methods that ignore this effect might result in reduced generalization and even divergence. In this paper, we propose an accumulated decoupled learning (ADL), which includes a module-wise gradient accumulation in order to mitigate the gradient staleness. Unlike prior arts ignoring the gradient staleness, we quantify the staleness in such a way that its mitigation can be quantitatively visualized. As a new learning scheme, the proposed ADL is theoretically shown to converge to critical points in spite of its asynchronism. Extensive experiments on CIFAR-10 and ImageNet datasets are conducted, demonstrating that ADL gives promising generalization results while the state-of-the-art methods experience reduced generalization and divergence. In addition, our ADL is shown to have the fastest training speed among the compared methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhuang21a/zhuang21a.pdf", "supp": "", "pdf_size": 2010788, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5538154327204671512&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "School of Electrical and Electronic Engineering, Nanyang Technological University; School of Electrical and Electronic Engineering, Nanyang Technological University; School of Electrical and Electronic Engineering, Nanyang Technological University; Department of Electrical and Electronic Engineering, Yonsei University; School of Electrical and Computer Engineering, National University of Singapore; School of Electrical and Electronic Engineering, Nanyang Technological University", "aff_domain": "; ; ; ; ;ntu.edu.sg", "email": "; ; ; ; ;ntu.edu.sg", "github": "https://github.com/ZHUANGHP/Accumulated-Decoupled-Learning.git", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhuang21a.html", "aff_unique_index": "0;0;0;1;2;0", "aff_unique_norm": "Nanyang Technological University;Yonsei University;National University of Singapore", "aff_unique_dep": "School of Electrical and Electronic Engineering;Department of Electrical and Electronic Engineering;School of Electrical and Computer Engineering", "aff_unique_url": "https://www.ntu.edu.sg;https://www.yonsei.ac.kr;https://www.nus.edu.sg", "aff_unique_abbr": "NTU;Yonsei;NUS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;0;0", "aff_country_unique": "Singapore;South Korea" }, { "title": "Accuracy on the Line: on the Strong Correlation Between Out-of-Distribution and In-Distribution Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9243", "id": "9243", "proceeding": "http://proceedings.mlr.press/v139/miller21b.html", "slides": "", "author_site": "John Miller, Rohan Taori, Aditi Raghunathan, Shiori Sagawa, Pang Wei Koh, Vaishaal Shankar, Percy Liang, Yair Carmon, Ludwig Schmidt", "author": "John P Miller; Rohan Taori; Aditi Raghunathan; Shiori Sagawa; Pang Wei Koh; Vaishaal Shankar; Percy Liang; Yair Carmon; Ludwig Schmidt", "abstract": "For machine learning systems to be reliable, we must understand their performance in unseen, out- of-distribution environments. In this paper, we empirically show that out-of-distribution performance is strongly correlated with in-distribution performance for a wide range of models and distribution shifts. Specifically, we demonstrate strong correlations between in-distribution and out-of- distribution performance on variants of CIFAR- 10 & ImageNet, a synthetic pose estimation task derived from YCB objects, FMoW-WILDS satellite imagery classification, and wildlife classification in iWildCam-WILDS. The correlation holds across model architectures, hyperparameters, training set size, and training duration, and is more precise than what is expected from existing domain adaptation theory. To complete the picture, we also investigate cases where the correlation is weaker, for instance some synthetic distribution shifts from CIFAR-10-C and the tissue classification dataset Camelyon17-WILDS. Finally, we provide a candidate theory based on a Gaussian data model that shows how changes in the data covariance arising from distribution shift can affect the observed correlations.", "bibtex": "@InProceedings{pmlr-v139-miller21b,\n title = \t {Accuracy on the Line: on the Strong Correlation Between Out-of-Distribution and In-Distribution Generalization},\n author = {Miller, John P and Taori, Rohan and Raghunathan, Aditi and Sagawa, Shiori and Koh, Pang Wei and Shankar, Vaishaal and Liang, Percy and Carmon, Yair and Schmidt, Ludwig},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7721--7735},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/miller21b/miller21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/miller21b.html},\n abstract = \t {For machine learning systems to be reliable, we must understand their performance in unseen, out- of-distribution environments. In this paper, we empirically show that out-of-distribution performance is strongly correlated with in-distribution performance for a wide range of models and distribution shifts. Specifically, we demonstrate strong correlations between in-distribution and out-of- distribution performance on variants of CIFAR- 10 & ImageNet, a synthetic pose estimation task derived from YCB objects, FMoW-WILDS satellite imagery classification, and wildlife classification in iWildCam-WILDS. The correlation holds across model architectures, hyperparameters, training set size, and training duration, and is more precise than what is expected from existing domain adaptation theory. To complete the picture, we also investigate cases where the correlation is weaker, for instance some synthetic distribution shifts from CIFAR-10-C and the tissue classification dataset Camelyon17-WILDS. Finally, we provide a candidate theory based on a Gaussian data model that shows how changes in the data covariance arising from distribution shift can affect the observed correlations.}\n}", "pdf": "http://proceedings.mlr.press/v139/miller21b/miller21b.pdf", "supp": "", "pdf_size": 2197200, "gs_citation": 333, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10338991679501236533&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, UC Berkeley, CA, USA; Department of Computer Science, Stanford University, Stanford, CA, USA; Department of Computer Science, Stanford University, Stanford, CA, USA; Department of Computer Science, Stanford University, Stanford, CA, USA; Department of Computer Science, Stanford University, Stanford, CA, USA; Department of Computer Science, UC Berkeley, CA, USA; Department of Computer Science, Stanford University, Stanford, CA, USA; School of Computer Science, Tel Aviv University, Tel Aviv, Israel; Toyota Research Institute, Cambridge, MA, USA", "aff_domain": "berkeley.edu; ; ; ; ; ; ; ; ", "email": "berkeley.edu; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/miller21b.html", "aff_unique_index": "0;1;1;1;1;0;1;2;3", "aff_unique_norm": "University of California, Berkeley;Stanford University;Tel Aviv University;Toyota Research Institute", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;School of Computer Science;", "aff_unique_url": "https://www.berkeley.edu;https://www.stanford.edu;https://www.tau.ac.il;https://www.tri.global", "aff_unique_abbr": "UC Berkeley;Stanford;TAU;TRI", "aff_campus_unique_index": "0;1;1;1;1;0;1;2;3", "aff_campus_unique": "Berkeley;Stanford;Tel Aviv;Cambridge", "aff_country_unique_index": "0;0;0;0;0;0;0;1;0", "aff_country_unique": "United States;Israel" }, { "title": "Accuracy, Interpretability, and Differential Privacy via Explainable Boosting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9575", "id": "9575", "proceeding": "http://proceedings.mlr.press/v139/nori21a.html", "slides": "", "author_site": "Harsha Nori, Rich Caruana, Zhiqi Bu, Judy Hanwen Shen, Janardhan Kulkarni", "author": "Harsha Nori; Rich Caruana; Zhiqi Bu; Judy Hanwen Shen; Janardhan Kulkarni", "abstract": "We show that adding differential privacy to Explainable Boosting Machines (EBMs), a recent method for training interpretable ML models, yields state-of-the-art accuracy while protecting privacy. Our experiments on multiple classification and regression datasets show that DP-EBM models suffer surprisingly little accuracy loss even with strong differential privacy guarantees. In addition to high accuracy, two other benefits of applying DP to EBMs are: a) trained models provide exact global and local interpretability, which is often important in settings where differential privacy is needed; and b) the models can be edited after training without loss of privacy to correct errors which DP noise may have introduced.", "bibtex": "@InProceedings{pmlr-v139-nori21a,\n title = \t {Accuracy, Interpretability, and Differential Privacy via Explainable Boosting},\n author = {Nori, Harsha and Caruana, Rich and Bu, Zhiqi and Shen, Judy Hanwen and Kulkarni, Janardhan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8227--8237},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nori21a/nori21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nori21a.html},\n abstract = \t {We show that adding differential privacy to Explainable Boosting Machines (EBMs), a recent method for training interpretable ML models, yields state-of-the-art accuracy while protecting privacy. Our experiments on multiple classification and regression datasets show that DP-EBM models suffer surprisingly little accuracy loss even with strong differential privacy guarantees. In addition to high accuracy, two other benefits of applying DP to EBMs are: a) trained models provide exact global and local interpretability, which is often important in settings where differential privacy is needed; and b) the models can be edited after training without loss of privacy to correct errors which DP noise may have introduced.}\n}", "pdf": "http://proceedings.mlr.press/v139/nori21a/nori21a.pdf", "supp": "", "pdf_size": 619406, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3909488782505274678&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Microsoft, Redmond, USA; Microsoft, Redmond, USA; University of Pennsylvania, Philadelphia, USA; Stanford University, Palo Alto, USA; Microsoft, Redmond, USA", "aff_domain": "microsoft.com; ; ; ; ", "email": "microsoft.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/nori21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Microsoft;University of Pennsylvania;Stanford University", "aff_unique_dep": "Microsoft Corporation;;", "aff_unique_url": "https://www.microsoft.com;https://www.upenn.edu;https://www.stanford.edu", "aff_unique_abbr": "Microsoft;UPenn;Stanford", "aff_campus_unique_index": "0;0;1;2;0", "aff_campus_unique": "Redmond;Philadelphia;Palo Alto", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Accurate Post Training Quantization With Small Calibration Sets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10173", "id": "10173", "proceeding": "http://proceedings.mlr.press/v139/hubara21a.html", "slides": "", "author_site": "Itay Hubara, Yury Nahshan, Yair Hanani, Ron Banner, Daniel Soudry", "author": "Itay Hubara; Yury Nahshan; Yair Hanani; Ron Banner; Daniel Soudry", "abstract": "Lately, post-training quantization methods have gained considerable attention, as they are simple to use, and require only a small unlabeled calibration set. This small dataset cannot be used to fine-tune the model without significant over-fitting. Instead, these methods only use the calibration set to set the activations\u2019 dynamic ranges. However, such methods always resulted in significant accuracy degradation, when used below 8-bits (except on small datasets). Here we aim to break the 8-bit barrier. To this end, we minimize the quantization errors of each layer or block separately by optimizing its parameters over the calibration set. We empirically demonstrate that this approach is: (1) much less susceptible to over-fitting than the standard fine-tuning approaches, and can be used even on a very small calibration set; and (2) more powerful than previous methods, which only set the activations\u2019 dynamic ranges. We suggest two flavors for our method, parallel and sequential aim for a fixed and flexible bit-width allocation. For the latter, we demonstrate how to optimally allocate the bit-widths for each layer, while constraining accuracy degradation or model compression by proposing a novel integer programming formulation. Finally, we suggest model global statistics tuning, to correct biases introduced during quantization. Together, these methods yield state-of-the-art results for both vision and text models. For instance, on ResNet50, we obtain less than 1% accuracy degradation \u2014 with 4-bit weights and activations in all layers, but first and last. The suggested methods are two orders of magnitude faster than the traditional Quantize Aware Training approach used for lower than 8-bit quantization. We open-sourced our code \\textit{https://github.com/papers-submission/CalibTIP}.", "bibtex": "@InProceedings{pmlr-v139-hubara21a,\n title = \t {Accurate Post Training Quantization With Small Calibration Sets},\n author = {Hubara, Itay and Nahshan, Yury and Hanani, Yair and Banner, Ron and Soudry, Daniel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4466--4475},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hubara21a/hubara21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hubara21a.html},\n abstract = \t {Lately, post-training quantization methods have gained considerable attention, as they are simple to use, and require only a small unlabeled calibration set. This small dataset cannot be used to fine-tune the model without significant over-fitting. Instead, these methods only use the calibration set to set the activations\u2019 dynamic ranges. However, such methods always resulted in significant accuracy degradation, when used below 8-bits (except on small datasets). Here we aim to break the 8-bit barrier. To this end, we minimize the quantization errors of each layer or block separately by optimizing its parameters over the calibration set. We empirically demonstrate that this approach is: (1) much less susceptible to over-fitting than the standard fine-tuning approaches, and can be used even on a very small calibration set; and (2) more powerful than previous methods, which only set the activations\u2019 dynamic ranges. We suggest two flavors for our method, parallel and sequential aim for a fixed and flexible bit-width allocation. For the latter, we demonstrate how to optimally allocate the bit-widths for each layer, while constraining accuracy degradation or model compression by proposing a novel integer programming formulation. Finally, we suggest model global statistics tuning, to correct biases introduced during quantization. Together, these methods yield state-of-the-art results for both vision and text models. For instance, on ResNet50, we obtain less than 1% accuracy degradation \u2014 with 4-bit weights and activations in all layers, but first and last. The suggested methods are two orders of magnitude faster than the traditional Quantize Aware Training approach used for lower than 8-bit quantization. We open-sourced our code \\textit{https://github.com/papers-submission/CalibTIP}.}\n}", "pdf": "http://proceedings.mlr.press/v139/hubara21a/hubara21a.pdf", "supp": "", "pdf_size": 626334, "gs_citation": 212, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15280969788096136248&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Habana Labs \u2013 An Intel company, Caesarea, Israel+Department of Electrical Engineering - Technion, Haifa, Israel; Habana Labs \u2013 An Intel company, Caesarea, Israel; Habana Labs \u2013 An Intel company, Caesarea, Israel; Habana Labs \u2013 An Intel company, Caesarea, Israel; Department of Electrical Engineering - Technion, Haifa, Israel", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "https://github.com/papers-submission/CalibTIP", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hubara21a.html", "aff_unique_index": "0+1;0;0;0;1", "aff_unique_norm": "Habana Labs;Technion - Israel Institute of Technology", "aff_unique_dep": ";Department of Electrical Engineering", "aff_unique_url": "https://www.habana.ai;https://www.technion.ac.il", "aff_unique_abbr": "Habana Labs;Technion", "aff_campus_unique_index": "0+1;0;0;0;1", "aff_campus_unique": "Caesarea;Haifa", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "Israel" }, { "title": "Achieving Near Instance-Optimality and Minimax-Optimality in Stochastic and Adversarial Linear Bandits Simultaneously", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9819", "id": "9819", "proceeding": "http://proceedings.mlr.press/v139/lee21h.html", "slides": "", "author_site": "Chung-Wei Lee, Haipeng Luo, Chen-Yu Wei, Mengxiao Zhang, Xiaojin Zhang", "author": "Chung-Wei Lee; Haipeng Luo; Chen-Yu Wei; Mengxiao Zhang; Xiaojin Zhang", "abstract": "In this work, we develop linear bandit algorithms that automatically adapt to different environments. By plugging a novel loss estimator into the optimization problem that characterizes the instance-optimal strategy, our first algorithm not only achieves nearly instance-optimal regret in stochastic environments, but also works in corrupted environments with additional regret being the amount of corruption, while the state-of-the-art (Li et al., 2019) achieves neither instance-optimality nor the optimal dependence on the corruption amount. Moreover, by equipping this algorithm with an adversarial component and carefully-designed testings, our second algorithm additionally enjoys minimax-optimal regret in completely adversarial environments, which is the first of this kind to our knowledge. Finally, all our guarantees hold with high probability, while existing instance-optimal guarantees only hold in expectation.", "bibtex": "@InProceedings{pmlr-v139-lee21h,\n title = \t {Achieving Near Instance-Optimality and Minimax-Optimality in Stochastic and Adversarial Linear Bandits Simultaneously},\n author = {Lee, Chung-Wei and Luo, Haipeng and Wei, Chen-Yu and Zhang, Mengxiao and Zhang, Xiaojin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6142--6151},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21h/lee21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21h.html},\n abstract = \t {In this work, we develop linear bandit algorithms that automatically adapt to different environments. By plugging a novel loss estimator into the optimization problem that characterizes the instance-optimal strategy, our first algorithm not only achieves nearly instance-optimal regret in stochastic environments, but also works in corrupted environments with additional regret being the amount of corruption, while the state-of-the-art (Li et al., 2019) achieves neither instance-optimality nor the optimal dependence on the corruption amount. Moreover, by equipping this algorithm with an adversarial component and carefully-designed testings, our second algorithm additionally enjoys minimax-optimal regret in completely adversarial environments, which is the first of this kind to our knowledge. Finally, all our guarantees hold with high probability, while existing instance-optimal guarantees only hold in expectation.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21h/lee21h.pdf", "supp": "", "pdf_size": 444504, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7827171225650801686&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Southern California; University of Southern California; University of Southern California; University of Southern California; The Chinese University of Hong Kong", "aff_domain": "usc.edu; ; ; ; ", "email": "usc.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lee21h.html", "aff_unique_index": "0;0;0;0;1", "aff_unique_norm": "University of Southern California;Chinese University of Hong Kong", "aff_unique_dep": ";", "aff_unique_url": "https://www.usc.edu;https://www.cuhk.edu.hk", "aff_unique_abbr": "USC;CUHK", "aff_campus_unique_index": "0;0;0;0;1", "aff_campus_unique": "Los Angeles;Hong Kong SAR", "aff_country_unique_index": "0;0;0;0;1", "aff_country_unique": "United States;China" }, { "title": "ActNN: Reducing Training Memory Footprint via 2-Bit Activation Compressed Training", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9443", "id": "9443", "proceeding": "http://proceedings.mlr.press/v139/chen21z.html", "slides": "/media/icml-2021/Slides/9443.pdf", "author_site": "Jianfei Chen, Lianmin Zheng, Zhewei Yao, Dequan Wang, Ion Stoica, Michael Mahoney, Joseph E Gonzalez", "author": "Jianfei Chen; Lianmin Zheng; Zhewei Yao; Dequan Wang; Ion Stoica; Michael Mahoney; Joseph Gonzalez", "abstract": "The increasing size of neural network models has been critical for improvements in their accuracy, but device memory is not growing at the same rate. This creates fundamental challenges for training neural networks within limited memory environments. In this work, we propose ActNN, a memory-efficient training framework that stores randomly quantized activations for back propagation. We prove the convergence of ActNN for general network architectures, and we characterize the impact of quantization on the convergence via an exact expression for the gradient variance. Using our theory, we propose novel mixed-precision quantization strategies that exploit the activation\u2019s heterogeneity across feature dimensions, samples, and layers. These techniques can be readily applied to existing dynamic graph frameworks, such as PyTorch, simply by substituting the layers. We evaluate ActNN on mainstream computer vision models for classification, detection, and segmentation tasks. On all these tasks, ActNN compresses the activation to 2 bits on average, with negligible accuracy loss. ActNN reduces the memory footprint of the activation by 12x, and it enables training with a 6.6x to 14x larger batch size.", "bibtex": "@InProceedings{pmlr-v139-chen21z,\n title = \t {ActNN: Reducing Training Memory Footprint via 2-Bit Activation Compressed Training},\n author = {Chen, Jianfei and Zheng, Lianmin and Yao, Zhewei and Wang, Dequan and Stoica, Ion and Mahoney, Michael and Gonzalez, Joseph},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1803--1813},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21z/chen21z.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21z.html},\n abstract = \t {The increasing size of neural network models has been critical for improvements in their accuracy, but device memory is not growing at the same rate. This creates fundamental challenges for training neural networks within limited memory environments. In this work, we propose ActNN, a memory-efficient training framework that stores randomly quantized activations for back propagation. We prove the convergence of ActNN for general network architectures, and we characterize the impact of quantization on the convergence via an exact expression for the gradient variance. Using our theory, we propose novel mixed-precision quantization strategies that exploit the activation\u2019s heterogeneity across feature dimensions, samples, and layers. These techniques can be readily applied to existing dynamic graph frameworks, such as PyTorch, simply by substituting the layers. We evaluate ActNN on mainstream computer vision models for classification, detection, and segmentation tasks. On all these tasks, ActNN compresses the activation to 2 bits on average, with negligible accuracy loss. ActNN reduces the memory footprint of the activation by 12x, and it enables training with a 6.6x to 14x larger batch size.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21z/chen21z.pdf", "supp": "", "pdf_size": 1612101, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3861965596155884920&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "UC Berkeley; UC Berkeley; UC Berkeley; UC Berkeley; UC Berkeley; UC Berkeley; UC Berkeley", "aff_domain": "berkeley.edu;berkeley.edu; ; ; ; ; ", "email": "berkeley.edu;berkeley.edu; ; ; ; ; ", "github": "https://github.com/ucbrise/actnn", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/chen21z.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Actionable Models: Unsupervised Offline Reinforcement Learning of Robotic Skills", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9743", "id": "9743", "proceeding": "http://proceedings.mlr.press/v139/chebotar21a.html", "slides": "/media/icml-2021/Slides/9743.pdf", "author_site": "Yevgen Chebotar, Karol Hausman, Yao Lu, Ted Xiao, Dmitry Kalashnikov, Jacob Varley, Alexander Irpan, Benjamin Eysenbach, Ryan C Julian, Chelsea Finn, Sergey Levine", "author": "Yevgen Chebotar; Karol Hausman; Yao Lu; Ted Xiao; Dmitry Kalashnikov; Jacob Varley; Alex Irpan; Benjamin Eysenbach; Ryan C Julian; Chelsea Finn; Sergey Levine", "abstract": "We consider the problem of learning useful robotic skills from previously collected offline data without access to manually specified rewards or additional online exploration, a setting that is becoming increasingly important for scaling robot learning by reusing past robotic data. In particular, we propose the objective of learning a functional understanding of the environment by learning to reach any goal state in a given dataset. We employ goal-conditioned Q-learning with hindsight relabeling and develop several techniques that enable training in a particularly challenging offline setting. We find that our method can operate on high-dimensional camera images and learn a variety of skills on real robots that generalize to previously unseen scenes and objects. We also show that our method can learn to reach long-horizon goals across multiple episodes through goal chaining, and learn rich representations that can help with downstream tasks through pre-training or auxiliary objectives.", "bibtex": "@InProceedings{pmlr-v139-chebotar21a,\n title = \t {Actionable Models: Unsupervised Offline Reinforcement Learning of Robotic Skills},\n author = {Chebotar, Yevgen and Hausman, Karol and Lu, Yao and Xiao, Ted and Kalashnikov, Dmitry and Varley, Jacob and Irpan, Alex and Eysenbach, Benjamin and Julian, Ryan C and Finn, Chelsea and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1518--1528},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chebotar21a/chebotar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chebotar21a.html},\n abstract = \t {We consider the problem of learning useful robotic skills from previously collected offline data without access to manually specified rewards or additional online exploration, a setting that is becoming increasingly important for scaling robot learning by reusing past robotic data. In particular, we propose the objective of learning a functional understanding of the environment by learning to reach any goal state in a given dataset. We employ goal-conditioned Q-learning with hindsight relabeling and develop several techniques that enable training in a particularly challenging offline setting. We find that our method can operate on high-dimensional camera images and learn a variety of skills on real robots that generalize to previously unseen scenes and objects. We also show that our method can learn to reach long-horizon goals across multiple episodes through goal chaining, and learn rich representations that can help with downstream tasks through pre-training or auxiliary objectives.}\n}", "pdf": "http://proceedings.mlr.press/v139/chebotar21a/chebotar21a.pdf", "supp": "", "pdf_size": 7286457, "gs_citation": 171, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15580684339400797315&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Robotics at Google; Robotics at Google; Robotics at Google; Robotics at Google; Robotics at Google; Robotics at Google; Robotics at Google; Robotics at Google + Carnegie Mellon University; Robotics at Google + University of Southern California; Robotics at Google + Stanford University; Robotics at Google + UC Berkeley", "aff_domain": "google.com; ; ; ; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "https://actionable-models.github.io", "author_num": 11, "oa": "https://proceedings.mlr.press/v139/chebotar21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0+1;0+2;0+3;0+4", "aff_unique_norm": "Google;Carnegie Mellon University;University of Southern California;Stanford University;University of California, Berkeley", "aff_unique_dep": "Robotics;;;;", "aff_unique_url": "https://www.google.com;https://www.cmu.edu;https://www.usc.edu;https://www.stanford.edu;https://www.berkeley.edu", "aff_unique_abbr": "Google Robotics;CMU;USC;Stanford;UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0+2;0+3;0+4", "aff_campus_unique": "Mountain View;;Los Angeles;Stanford;Berkeley", "aff_country_unique_index": "0;0;0;0;0;0;0;0+0;0+0;0+0;0+0", "aff_country_unique": "United States" }, { "title": "Active Covering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10019", "id": "10019", "proceeding": "http://proceedings.mlr.press/v139/jiang21i.html", "slides": "", "author_site": "Heinrich Jiang, Afshin Rostamizadeh", "author": "Heinrich Jiang; Afshin Rostamizadeh", "abstract": "We analyze the problem of active covering, where the learner is given an unlabeled dataset and can sequentially label query examples. The objective is to label query all of the positive examples in the fewest number of total label queries. We show under standard non-parametric assumptions that a classical support estimator can be repurposed as an offline algorithm attaining an excess query cost of $\\widetilde{\\Theta}(n^{D/(D+1)})$ compared to the optimal learner, where $n$ is the number of datapoints and $D$ is the dimension. We then provide a simple active learning method that attains an improved excess query cost of $\\widetilde{O}(n^{(D-1)/D})$. Furthermore, the proposed algorithms only require access to the positive labeled examples, which in certain settings provides additional computational and privacy benefits. Finally, we show that the active learning method consistently outperforms offline methods as well as a variety of baselines on a wide range of benchmark image-based datasets.", "bibtex": "@InProceedings{pmlr-v139-jiang21i,\n title = \t {Active Covering},\n author = {Jiang, Heinrich and Rostamizadeh, Afshin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5013--5022},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21i/jiang21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21i.html},\n abstract = \t {We analyze the problem of active covering, where the learner is given an unlabeled dataset and can sequentially label query examples. The objective is to label query all of the positive examples in the fewest number of total label queries. We show under standard non-parametric assumptions that a classical support estimator can be repurposed as an offline algorithm attaining an excess query cost of $\\widetilde{\\Theta}(n^{D/(D+1)})$ compared to the optimal learner, where $n$ is the number of datapoints and $D$ is the dimension. We then provide a simple active learning method that attains an improved excess query cost of $\\widetilde{O}(n^{(D-1)/D})$. Furthermore, the proposed algorithms only require access to the positive labeled examples, which in certain settings provides additional computational and privacy benefits. Finally, we show that the active learning method consistently outperforms offline methods as well as a variety of baselines on a wide range of benchmark image-based datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21i/jiang21i.pdf", "supp": "", "pdf_size": 544044, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15441065274451715871&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Google Research; Google Research", "aff_domain": "google.com; ", "email": "google.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jiang21i.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Active Deep Probabilistic Subsampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10593", "id": "10593", "proceeding": "http://proceedings.mlr.press/v139/van-gorp21a.html", "slides": "", "author_site": "Hans van Gorp, Iris Huijben, Bastiaan Veeling, Nicola Pezzotti, Ruud J. G. van Sloun", "author": "Hans Van Gorp; Iris Huijben; Bastiaan S Veeling; Nicola Pezzotti; Ruud J. G. Van Sloun", "abstract": "Subsampling a signal of interest can reduce costly data transfer, battery drain, radiation exposure and acquisition time in a wide range of problems. The recently proposed Deep Probabilistic Subsampling (DPS) method effectively integrates subsampling in an end-to-end deep learning model, but learns a static pattern for all datapoints. We generalize DPS to a sequential method that actively picks the next sample based on the information acquired so far; dubbed Active-DPS (A-DPS). We validate that A-DPS improves over DPS for MNIST classification at high subsampling rates. Moreover, we demonstrate strong performance in active acquisition Magnetic Resonance Image (MRI) reconstruction, outperforming DPS and other deep learning methods.", "bibtex": "@InProceedings{pmlr-v139-van-gorp21a,\n title = \t {Active Deep Probabilistic Subsampling},\n author = {Van Gorp, Hans and Huijben, Iris and Veeling, Bastiaan S and Pezzotti, Nicola and Van Sloun, Ruud J. G.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10509--10518},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/van-gorp21a/van-gorp21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/van-gorp21a.html},\n abstract = \t {Subsampling a signal of interest can reduce costly data transfer, battery drain, radiation exposure and acquisition time in a wide range of problems. The recently proposed Deep Probabilistic Subsampling (DPS) method effectively integrates subsampling in an end-to-end deep learning model, but learns a static pattern for all datapoints. We generalize DPS to a sequential method that actively picks the next sample based on the information acquired so far; dubbed Active-DPS (A-DPS). We validate that A-DPS improves over DPS for MNIST classification at high subsampling rates. Moreover, we demonstrate strong performance in active acquisition Magnetic Resonance Image (MRI) reconstruction, outperforming DPS and other deep learning methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/van-gorp21a/van-gorp21a.pdf", "supp": "", "pdf_size": 1358768, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17221435126218428736&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical Engineering, Eindhoven University of Technology; Department of Electrical Engineering, Eindhoven University of Technology; Department of Computer Science, University of Amsterdam; Department of Computer Science, Eindhoven University of Technology + Philips Research; Department of Electrical Engineering, Eindhoven University of Technology + Philips Research", "aff_domain": "tue.nl; ; ; ; ", "email": "tue.nl; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/van-gorp21a.html", "aff_unique_index": "0;0;1;0+2;0+2", "aff_unique_norm": "Eindhoven University of Technology;University of Amsterdam;Philips Research", "aff_unique_dep": "Department of Electrical Engineering;Department of Computer Science;", "aff_unique_url": "https://www.tue.nl;https://www.uva.nl;https://www.philips.com/research", "aff_unique_abbr": "TU/e;UvA;Philips Research", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Eindhoven;", "aff_country_unique_index": "0;0;0;0+0;0+0", "aff_country_unique": "Netherlands" }, { "title": "Active Feature Acquisition with Generative Surrogate Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10533", "id": "10533", "proceeding": "http://proceedings.mlr.press/v139/li21p.html", "slides": "/media/icml-2021/Slides/10533.pdf", "author_site": "Yang Li, Junier Oliva", "author": "Yang Li; Junier Oliva", "abstract": "Many real-world situations allow for the acquisition of additional relevant information when making an assessment with limited or uncertain data. However, traditional ML approaches either require all features to be acquired beforehand or regard part of them as missing data that cannot be acquired. In this work, we consider models that perform active feature acquisition (AFA) and query the environment for unobserved features to improve the prediction assessments at evaluation time. Our work reformulates the Markov decision process (MDP) that underlies the AFA problem as a generative modeling task and optimizes a policy via a novel model-based approach. We propose learning a generative surrogate model (GSM) that captures the dependencies among input features to assess potential information gain from acquisitions. The GSM is leveraged to provide intermediate rewards and auxiliary information to aid the agent navigate a complicated high-dimensional action space and sparse rewards. Furthermore, we extend AFA in a task we coin active instance recognition (AIR) for the unsupervised case where the target variables are the unobserved features themselves and the goal is to collect information for a particular instance in a cost-efficient way. Empirical results demonstrate that our approach achieves considerably better performance than previous state of the art methods on both supervised and unsupervised tasks.", "bibtex": "@InProceedings{pmlr-v139-li21p,\n title = \t {Active Feature Acquisition with Generative Surrogate Models},\n author = {Li, Yang and Oliva, Junier},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6450--6459},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21p/li21p.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21p.html},\n abstract = \t {Many real-world situations allow for the acquisition of additional relevant information when making an assessment with limited or uncertain data. However, traditional ML approaches either require all features to be acquired beforehand or regard part of them as missing data that cannot be acquired. In this work, we consider models that perform active feature acquisition (AFA) and query the environment for unobserved features to improve the prediction assessments at evaluation time. Our work reformulates the Markov decision process (MDP) that underlies the AFA problem as a generative modeling task and optimizes a policy via a novel model-based approach. We propose learning a generative surrogate model (GSM) that captures the dependencies among input features to assess potential information gain from acquisitions. The GSM is leveraged to provide intermediate rewards and auxiliary information to aid the agent navigate a complicated high-dimensional action space and sparse rewards. Furthermore, we extend AFA in a task we coin active instance recognition (AIR) for the unsupervised case where the target variables are the unobserved features themselves and the goal is to collect information for a particular instance in a cost-efficient way. Empirical results demonstrate that our approach achieves considerably better performance than previous state of the art methods on both supervised and unsupervised tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21p/li21p.pdf", "supp": "", "pdf_size": 1758046, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15824781851708101221&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA; Department of Computer Science, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA", "aff_domain": "cs.unc.edu;cs.unc.edu", "email": "cs.unc.edu;cs.unc.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21p.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of North Carolina at Chapel Hill", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.unc.edu", "aff_unique_abbr": "UNC Chapel Hill", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Chapel Hill", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Active Learning for Distributionally Robust Level-Set Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8729", "id": "8729", "proceeding": "http://proceedings.mlr.press/v139/inatsu21a.html", "slides": "", "author_site": "Yu Inatsu, Shogo Iwazaki, Ichiro Takeuchi", "author": "Yu Inatsu; Shogo Iwazaki; Ichiro Takeuchi", "abstract": "Many cases exist in which a black-box function $f$ with high evaluation cost depends on two types of variables $\\bm x$ and $\\bm w$, where $\\bm x$ is a controllable \\emph{design} variable and $\\bm w$ are uncontrollable \\emph{environmental} variables that have random variation following a certain distribution $P$. In such cases, an important task is to find the range of design variables $\\bm x$ such that the function $f(\\bm x, \\bm w)$ has the desired properties by incorporating the random variation of the environmental variables $\\bm w$. A natural measure of robustness is the probability that $f(\\bm x, \\bm w)$ exceeds a given threshold $h$, which is known as the \\emph{probability threshold robustness} (PTR) measure in the literature on robust optimization. However, this robustness measure cannot be correctly evaluated when the distribution $P$ is unknown. In this study, we addressed this problem by considering the \\textit{distributionally robust PTR} (DRPTR) measure, which considers the worst-case PTR within given candidate distributions. Specifically, we studied the problem of efficiently identifying a reliable set $H$, which is defined as a region in which the DRPTR measure exceeds a certain desired probability $\\alpha$, which can be interpreted as a level set estimation (LSE) problem for DRPTR. We propose a theoretically grounded and computationally efficient active learning method for this problem. We show that the proposed method has theoretical guarantees on convergence and accuracy, and confirmed through numerical experiments that the proposed method outperforms existing methods.", "bibtex": "@InProceedings{pmlr-v139-inatsu21a,\n title = \t {Active Learning for Distributionally Robust Level-Set Estimation},\n author = {Inatsu, Yu and Iwazaki, Shogo and Takeuchi, Ichiro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4574--4584},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/inatsu21a/inatsu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/inatsu21a.html},\n abstract = \t {Many cases exist in which a black-box function $f$ with high evaluation cost depends on two types of variables $\\bm x$ and $\\bm w$, where $\\bm x$ is a controllable \\emph{design} variable and $\\bm w$ are uncontrollable \\emph{environmental} variables that have random variation following a certain distribution $P$. In such cases, an important task is to find the range of design variables $\\bm x$ such that the function $f(\\bm x, \\bm w)$ has the desired properties by incorporating the random variation of the environmental variables $\\bm w$. A natural measure of robustness is the probability that $f(\\bm x, \\bm w)$ exceeds a given threshold $h$, which is known as the \\emph{probability threshold robustness} (PTR) measure in the literature on robust optimization. However, this robustness measure cannot be correctly evaluated when the distribution $P$ is unknown. In this study, we addressed this problem by considering the \\textit{distributionally robust PTR} (DRPTR) measure, which considers the worst-case PTR within given candidate distributions. Specifically, we studied the problem of efficiently identifying a reliable set $H$, which is defined as a region in which the DRPTR measure exceeds a certain desired probability $\\alpha$, which can be interpreted as a level set estimation (LSE) problem for DRPTR. We propose a theoretically grounded and computationally efficient active learning method for this problem. We show that the proposed method has theoretical guarantees on convergence and accuracy, and confirmed through numerical experiments that the proposed method outperforms existing methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/inatsu21a/inatsu21a.pdf", "supp": "", "pdf_size": 498052, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14765975944116564958&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Nagoya Institute of Technology, Aichi, Japan+RIKEN Center for Advanced Intelligence Project, Tokyo, Japan; Department of Computer Science, Nagoya Institute of Technology, Aichi, Japan+RIKEN Center for Advanced Intelligence Project, Tokyo, Japan; Department of Computer Science, Nagoya Institute of Technology, Aichi, Japan+RIKEN Center for Advanced Intelligence Project, Tokyo, Japan", "aff_domain": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp", "email": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/inatsu21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Nagoya Institute of Technology;RIKEN Center for Advanced Intelligence Project", "aff_unique_dep": "Department of Computer Science;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.nitech.ac.jp;https://www.riken.jp/en/c-aip/", "aff_unique_abbr": "NIT;RIKEN C-AIP", "aff_campus_unique_index": "0+1;0+1;0+1", "aff_campus_unique": "Nagoya;Tokyo", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Active Learning of Continuous-time Bayesian Networks through Interventions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9647", "id": "9647", "proceeding": "http://proceedings.mlr.press/v139/linzner21a.html", "slides": "/media/icml-2021/Slides/9647.pdf", "author_site": "Dominik Linzner, Heinz Koeppl", "author": "Dominik Linzner; Heinz Koeppl", "abstract": "We consider the problem of learning structures and parameters of Continuous-time Bayesian Networks (CTBNs) from time-course data under minimal experimental resources. In practice, the cost of generating experimental data poses a bottleneck, especially in the natural and social sciences. A popular approach to overcome this is Bayesian optimal experimental design (BOED). However, BOED becomes infeasible in high-dimensional settings, as it involves integration over all possible experimental outcomes. We propose a novel criterion for experimental design based on a variational approximation of the expected information gain. We show that for CTBNs, a semi-analytical expression for this criterion can be calculated for structure and parameter learning. By doing so, we can replace sampling over experimental outcomes by solving the CTBNs master-equation, for which scalable approximations exist. This alleviates the computational burden of sampling possible experimental outcomes in high-dimensions. We employ this framework to recommend interventional sequences. In this context, we extend the CTBN model to conditional CTBNs to incorporate interventions. We demonstrate the performance of our criterion on synthetic and real-world data.", "bibtex": "@InProceedings{pmlr-v139-linzner21a,\n title = \t {Active Learning of Continuous-time Bayesian Networks through Interventions},\n author = {Linzner, Dominik and Koeppl, Heinz},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6692--6701},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/linzner21a/linzner21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/linzner21a.html},\n abstract = \t {We consider the problem of learning structures and parameters of Continuous-time Bayesian Networks (CTBNs) from time-course data under minimal experimental resources. In practice, the cost of generating experimental data poses a bottleneck, especially in the natural and social sciences. A popular approach to overcome this is Bayesian optimal experimental design (BOED). However, BOED becomes infeasible in high-dimensional settings, as it involves integration over all possible experimental outcomes. We propose a novel criterion for experimental design based on a variational approximation of the expected information gain. We show that for CTBNs, a semi-analytical expression for this criterion can be calculated for structure and parameter learning. By doing so, we can replace sampling over experimental outcomes by solving the CTBNs master-equation, for which scalable approximations exist. This alleviates the computational burden of sampling possible experimental outcomes in high-dimensions. We employ this framework to recommend interventional sequences. In this context, we extend the CTBN model to conditional CTBNs to incorporate interventions. We demonstrate the performance of our criterion on synthetic and real-world data.}\n}", "pdf": "http://proceedings.mlr.press/v139/linzner21a/linzner21a.pdf", "supp": "", "pdf_size": 1272883, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8376059398200178998&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Engineering and Information Technology, TU Darmstadt, Germany+The Why Company GmbH, Berlin, Germany; Department of Engineering and Information Technology, TU Darmstadt, Germany+Department of Biology, TU Darmstadt, Germany", "aff_domain": "gmail.com;bcs.tu-darmstadt.de", "email": "gmail.com;bcs.tu-darmstadt.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/linzner21a.html", "aff_unique_index": "0+1;0+0", "aff_unique_norm": "Technische Universit\u00e4t Darmstadt;Why Company", "aff_unique_dep": "Department of Engineering and Information Technology;", "aff_unique_url": "https://www.tu-darmstadt.de;", "aff_unique_abbr": "TU Darmstadt;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Germany" }, { "title": "Active Slices for Sliced Stein Discrepancy", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10539", "id": "10539", "proceeding": "http://proceedings.mlr.press/v139/gong21a.html", "slides": "", "author_site": "Wenbo Gong, Kaibo Zhang, Yingzhen Li, Jose Miguel Hernandez-Lobato", "author": "Wenbo Gong; Kaibo Zhang; Yingzhen Li; Jose Miguel Hernandez-Lobato", "abstract": "Sliced Stein discrepancy (SSD) and its kernelized variants have demonstrated promising successes in goodness-of-fit tests and model learning in high dimensions. Despite the theoretical elegance, their empirical performance depends crucially on the search of the optimal slicing directions to discriminate between two distributions. Unfortunately, previous gradient-based optimisation approach returns sub-optimal results for the slicing directions: it is computationally expensive, sensitive to initialization, and it lacks theoretical guarantee for convergence. We address these issues in two steps. First, we show in theory that the requirement of using optimal slicing directions in the kernelized version of SSD can be relaxed, validating the resulting discrepancy with finite random slicing directions. Second, given that good slicing directions are crucial for practical performance, we propose a fast algorithm for finding good slicing directions based on ideas of active sub-space construction and spectral decomposition. Experiments in goodness-of-fit tests and model learning show that our approach achieves both the best performance and the fastest convergence. Especially, we demonstrate 14-80x speed-up in goodness-of-fit tests when compared with the gradient-based approach.", "bibtex": "@InProceedings{pmlr-v139-gong21a,\n title = \t {Active Slices for Sliced Stein Discrepancy},\n author = {Gong, Wenbo and Zhang, Kaibo and Li, Yingzhen and Hernandez-Lobato, Jose Miguel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3766--3776},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gong21a/gong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gong21a.html},\n abstract = \t {Sliced Stein discrepancy (SSD) and its kernelized variants have demonstrated promising successes in goodness-of-fit tests and model learning in high dimensions. Despite the theoretical elegance, their empirical performance depends crucially on the search of the optimal slicing directions to discriminate between two distributions. Unfortunately, previous gradient-based optimisation approach returns sub-optimal results for the slicing directions: it is computationally expensive, sensitive to initialization, and it lacks theoretical guarantee for convergence. We address these issues in two steps. First, we show in theory that the requirement of using optimal slicing directions in the kernelized version of SSD can be relaxed, validating the resulting discrepancy with finite random slicing directions. Second, given that good slicing directions are crucial for practical performance, we propose a fast algorithm for finding good slicing directions based on ideas of active sub-space construction and spectral decomposition. Experiments in goodness-of-fit tests and model learning show that our approach achieves both the best performance and the fastest convergence. Especially, we demonstrate 14-80x speed-up in goodness-of-fit tests when compared with the gradient-based approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/gong21a/gong21a.pdf", "supp": "", "pdf_size": 3214592, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9280564173167932948&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Engineering, University of Cambridge, Cambridge, United Kingdom; Department of Engineering, University of Cambridge, Cambridge, United Kingdom; Department of Computing, Imperial College London, London, United Kingdom; Department of Engineering, University of Cambridge, Cambridge, United Kingdom", "aff_domain": "cam.ac.uk; ;imperial.ac.uk;cam.ac.uk", "email": "cam.ac.uk; ;imperial.ac.uk;cam.ac.uk", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gong21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "University of Cambridge;Imperial College London", "aff_unique_dep": "Department of Engineering;Department of Computing", "aff_unique_url": "https://www.cam.ac.uk;https://www.imperial.ac.uk", "aff_unique_abbr": "Cambridge;Imperial College", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Cambridge;London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Active Testing: Sample-Efficient Model Evaluation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9293", "id": "9293", "proceeding": "http://proceedings.mlr.press/v139/kossen21a.html", "slides": "/media/icml-2021/Slides/9293_Kc9iMtj.pdf", "author_site": "Jannik Kossen, Sebastian Farquhar, Yarin Gal, Tom Rainforth", "author": "Jannik Kossen; Sebastian Farquhar; Yarin Gal; Tom Rainforth", "abstract": "We introduce a new framework for sample-efficient model evaluation that we call active testing. While approaches like active learning reduce the number of labels needed for model training, existing literature largely ignores the cost of labeling test data, typically unrealistically assuming large test sets for model evaluation. This creates a disconnect to real applications, where test labels are important and just as expensive, e.g. for optimizing hyperparameters. Active testing addresses this by carefully selecting the test points to label, ensuring model evaluation is sample-efficient. To this end, we derive theoretically-grounded and intuitive acquisition strategies that are specifically tailored to the goals of active testing, noting these are distinct to those of active learning. As actively selecting labels introduces a bias; we further show how to remove this bias while reducing the variance of the estimator at the same time. Active testing is easy to implement and can be applied to any supervised machine learning method. We demonstrate its effectiveness on models including WideResNets and Gaussian processes on datasets including Fashion-MNIST and CIFAR-100.", "bibtex": "@InProceedings{pmlr-v139-kossen21a,\n title = \t {Active Testing: Sample-Efficient Model Evaluation},\n author = {Kossen, Jannik and Farquhar, Sebastian and Gal, Yarin and Rainforth, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5753--5763},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kossen21a/kossen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kossen21a.html},\n abstract = \t {We introduce a new framework for sample-efficient model evaluation that we call active testing. While approaches like active learning reduce the number of labels needed for model training, existing literature largely ignores the cost of labeling test data, typically unrealistically assuming large test sets for model evaluation. This creates a disconnect to real applications, where test labels are important and just as expensive, e.g. for optimizing hyperparameters. Active testing addresses this by carefully selecting the test points to label, ensuring model evaluation is sample-efficient. To this end, we derive theoretically-grounded and intuitive acquisition strategies that are specifically tailored to the goals of active testing, noting these are distinct to those of active learning. As actively selecting labels introduces a bias; we further show how to remove this bias while reducing the variance of the estimator at the same time. Active testing is easy to implement and can be applied to any supervised machine learning method. We demonstrate its effectiveness on models including WideResNets and Gaussian processes on datasets including Fashion-MNIST and CIFAR-100.}\n}", "pdf": "http://proceedings.mlr.press/v139/kossen21a/kossen21a.pdf", "supp": "", "pdf_size": 700417, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9561072418583325722&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "OATML, Department of Computer Science; OATML, Department of Computer Science; OATML, Department of Computer Science; Department of Statistics, Oxford", "aff_domain": "cs.ox.ac.uk; ; ; ", "email": "cs.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kossen21a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Oxford Advanced Technologies Limited;University of Oxford", "aff_unique_dep": "Department of Computer Science;Department of Statistics", "aff_unique_url": "https://www.oatml.co.uk;https://www.ox.ac.uk", "aff_unique_abbr": "OATML;Oxford", "aff_campus_unique_index": "1", "aff_campus_unique": ";Oxford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "AdaXpert: Adapting Neural Architecture for Growing Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8621", "id": "8621", "proceeding": "http://proceedings.mlr.press/v139/niu21a.html", "slides": "", "author_site": "Shuaicheng Niu, Jiaxiang Wu, Guanghui Xu, Yifan Zhang, Yong Guo, Peilin Zhao, Peng Wang, Mingkui Tan", "author": "Shuaicheng Niu; Jiaxiang Wu; Guanghui Xu; Yifan Zhang; Yong Guo; Peilin Zhao; Peng Wang; Mingkui Tan", "abstract": "In real-world applications, data often come in a growing manner, where the data volume and the number of classes may increase dynamically. This will bring a critical challenge for learning: given the increasing data volume or the number of classes, one has to instantaneously adjust the neural model capacity to obtain promising performance. Existing methods either ignore the growing nature of data or seek to independently search an optimal architecture for a given dataset, and thus are incapable of promptly adjusting the architectures for the changed data. To address this, we present a neural architecture adaptation method, namely Adaptation eXpert (AdaXpert), to efficiently adjust previous architectures on the growing data. Specifically, we introduce an architecture adjuster to generate a suitable architecture for each data snapshot, based on the previous architecture and the different extent between current and previous data distributions. Furthermore, we propose an adaptation condition to determine the necessity of adjustment, thereby avoiding unnecessary and time-consuming adjustments. Extensive experiments on two growth scenarios (increasing data volume and number of classes) demonstrate the effectiveness of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-niu21a,\n title = \t {AdaXpert: Adapting Neural Architecture for Growing Data},\n author = {Niu, Shuaicheng and Wu, Jiaxiang and Xu, Guanghui and Zhang, Yifan and Guo, Yong and Zhao, Peilin and Wang, Peng and Tan, Mingkui},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8184--8194},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/niu21a/niu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/niu21a.html},\n abstract = \t {In real-world applications, data often come in a growing manner, where the data volume and the number of classes may increase dynamically. This will bring a critical challenge for learning: given the increasing data volume or the number of classes, one has to instantaneously adjust the neural model capacity to obtain promising performance. Existing methods either ignore the growing nature of data or seek to independently search an optimal architecture for a given dataset, and thus are incapable of promptly adjusting the architectures for the changed data. To address this, we present a neural architecture adaptation method, namely Adaptation eXpert (AdaXpert), to efficiently adjust previous architectures on the growing data. Specifically, we introduce an architecture adjuster to generate a suitable architecture for each data snapshot, based on the previous architecture and the different extent between current and previous data distributions. Furthermore, we propose an adaptation condition to determine the necessity of adjustment, thereby avoiding unnecessary and time-consuming adjustments. Extensive experiments on two growth scenarios (increasing data volume and number of classes) demonstrate the effectiveness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/niu21a/niu21a.pdf", "supp": "", "pdf_size": 914764, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1668694704547918132&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Software Engineering, South China University of Technology, China+Key Laboratory of Big Data and Intelligent Robot, Ministry of Education, China; Tencent AI Lab, China; School of Software Engineering, South China University of Technology, China; National University of Singapore, Singapore; School of Software Engineering, South China University of Technology, China; Tencent AI Lab, China; Northwestern Polytechnical University, China; School of Software Engineering, South China University of Technology, China+Pazhou Laboratory, China", "aff_domain": "tencent.com;tencent.com;scut.edu.cn;u.nus.edu;scut.edu.cn;tencent.com;nwpu.edu.cn;scut.edu.cn", "email": "tencent.com;tencent.com;scut.edu.cn;u.nus.edu;scut.edu.cn;tencent.com;nwpu.edu.cn;scut.edu.cn", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/niu21a.html", "aff_unique_index": "0+1;2;0;3;0;2;4;0+5", "aff_unique_norm": "South China University of Technology;Key Laboratory of Big Data and Intelligent Robot;Tencent;National University of Singapore;Northwestern Polytechnical University;Pazhou Laboratory", "aff_unique_dep": "School of Software Engineering;Ministry of Education;Tencent AI Lab;;;", "aff_unique_url": "https://www.scut.edu.cn;;https://ai.tencent.com;https://www.nus.edu.sg;http://www.nwpu.edu.cn;", "aff_unique_abbr": "SCUT;;Tencent AI Lab;NUS;NWPU;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;1;0;0;0;0+0", "aff_country_unique": "China;Singapore" }, { "title": "Adapting to Delays and Data in Adversarial Multi-Armed Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9277", "id": "9277", "proceeding": "http://proceedings.mlr.press/v139/gyorgy21a.html", "slides": "/media/icml-2021/Slides/9277.pdf", "author_site": "Andr\u00e1s Gy\u00f6rgy, Pooria Joulani", "author": "Andras Gyorgy; Pooria Joulani", "abstract": "We consider the adversarial multi-armed bandit problem under delayed feedback. We analyze variants of the Exp3 algorithm that tune their step size using only information (about the losses and delays) available at the time of the decisions, and obtain regret guarantees that adapt to the observed (rather than the worst-case) sequences of delays and/or losses. First, through a remarkably simple proof technique, we show that with proper tuning of the step size, the algorithm achieves an optimal (up to logarithmic factors) regret of order $\\sqrt{\\log(K)(TK + D)}$ both in expectation and in high probability, where $K$ is the number of arms, $T$ is the time horizon, and $D$ is the cumulative delay. The high-probability version of the bound, which is the first high-probability delay-adaptive bound in the literature, crucially depends on the use of implicit exploration in estimating the losses. Then, following Zimmert and Seldin (2019), we extend these results so that the algorithm can \u201cskip\u201d rounds with large delays, resulting in regret bounds of order $\\sqrt{TK\\log(K)} + |R| + \\sqrt{D_{\\bar{R}}\\log(K)}$, where $R$ is an arbitrary set of rounds (which are skipped) and $D_{\\bar{R}}$ is the cumulative delay of the feedback for other rounds. Finally, we present another, data-adaptive (AdaGrad-style) version of the algorithm for which the regret adapts to the observed (delayed) losses instead of only adapting to the cumulative delay (this algorithm requires an a priori upper bound on the maximum delay, or the advance knowledge of the delay for each decision when it is made). The resulting bound can be orders of magnitude smaller on benign problems, and it can be shown that the delay only affects the regret through the loss of the best arm.", "bibtex": "@InProceedings{pmlr-v139-gyorgy21a,\n title = \t {Adapting to Delays and Data in Adversarial Multi-Armed Bandits},\n author = {Gyorgy, Andras and Joulani, Pooria},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3988--3997},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gyorgy21a/gyorgy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gyorgy21a.html},\n abstract = \t {We consider the adversarial multi-armed bandit problem under delayed feedback. We analyze variants of the Exp3 algorithm that tune their step size using only information (about the losses and delays) available at the time of the decisions, and obtain regret guarantees that adapt to the observed (rather than the worst-case) sequences of delays and/or losses. First, through a remarkably simple proof technique, we show that with proper tuning of the step size, the algorithm achieves an optimal (up to logarithmic factors) regret of order $\\sqrt{\\log(K)(TK + D)}$ both in expectation and in high probability, where $K$ is the number of arms, $T$ is the time horizon, and $D$ is the cumulative delay. The high-probability version of the bound, which is the first high-probability delay-adaptive bound in the literature, crucially depends on the use of implicit exploration in estimating the losses. Then, following Zimmert and Seldin (2019), we extend these results so that the algorithm can \u201cskip\u201d rounds with large delays, resulting in regret bounds of order $\\sqrt{TK\\log(K)} + |R| + \\sqrt{D_{\\bar{R}}\\log(K)}$, where $R$ is an arbitrary set of rounds (which are skipped) and $D_{\\bar{R}}$ is the cumulative delay of the feedback for other rounds. Finally, we present another, data-adaptive (AdaGrad-style) version of the algorithm for which the regret adapts to the observed (delayed) losses instead of only adapting to the cumulative delay (this algorithm requires an a priori upper bound on the maximum delay, or the advance knowledge of the delay for each decision when it is made). The resulting bound can be orders of magnitude smaller on benign problems, and it can be shown that the delay only affects the regret through the loss of the best arm.}\n}", "pdf": "http://proceedings.mlr.press/v139/gyorgy21a/gyorgy21a.pdf", "supp": "", "pdf_size": 340605, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6207729046668057874&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "deepmind.com;deepmind.com", "email": "deepmind.com;deepmind.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gyorgy21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Adapting to misspecification in contextual bandits with offline regression oracles", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10169", "id": "10169", "proceeding": "http://proceedings.mlr.press/v139/krishnamurthy21a.html", "slides": "", "author_site": "Sanath Kumar Krishnamurthy, Vitor Hadad, Susan Athey", "author": "Sanath Kumar Krishnamurthy; Vitor Hadad; Susan Athey", "abstract": "Computationally efficient contextual bandits are often based on estimating a predictive model of rewards given contexts and arms using past data. However, when the reward model is not well-specified, the bandit algorithm may incur unexpected regret, so recent work has focused on algorithms that are robust to misspecification. We propose a simple family of contextual bandit algorithms that adapt to misspecification error by reverting to a good safe policy when there is evidence that misspecification is causing a regret increase. Our algorithm requires only an offline regression oracle to ensure regret guarantees that gracefully degrade in terms of a measure of the average misspecification level. Compared to prior work, we attain similar regret guarantees, but we do no rely on a master algorithm, and do not require more robust oracles like online or constrained regression oracles (e.g., Foster et al. (2020), Krishnamurthy et al. (2020)). This allows us to design algorithms for more general function approximation classes.", "bibtex": "@InProceedings{pmlr-v139-krishnamurthy21a,\n title = \t {Adapting to misspecification in contextual bandits with offline regression oracles},\n author = {Krishnamurthy, Sanath Kumar and Hadad, Vitor and Athey, Susan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5805--5814},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/krishnamurthy21a/krishnamurthy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/krishnamurthy21a.html},\n abstract = \t {Computationally efficient contextual bandits are often based on estimating a predictive model of rewards given contexts and arms using past data. However, when the reward model is not well-specified, the bandit algorithm may incur unexpected regret, so recent work has focused on algorithms that are robust to misspecification. We propose a simple family of contextual bandit algorithms that adapt to misspecification error by reverting to a good safe policy when there is evidence that misspecification is causing a regret increase. Our algorithm requires only an offline regression oracle to ensure regret guarantees that gracefully degrade in terms of a measure of the average misspecification level. Compared to prior work, we attain similar regret guarantees, but we do no rely on a master algorithm, and do not require more robust oracles like online or constrained regression oracles (e.g., Foster et al. (2020), Krishnamurthy et al. (2020)). This allows us to design algorithms for more general function approximation classes.}\n}", "pdf": "http://proceedings.mlr.press/v139/krishnamurthy21a/krishnamurthy21a.pdf", "supp": "", "pdf_size": 404728, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15817262721751494012&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Management Science and Engineering, Stanford University; Graduate School of Business, Stanford University; Graduate School of Business, Stanford University", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/krishnamurthy21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Management Science and Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Newton Sketch: Linear-time Optimization with Quadratic Convergence and Effective Hessian Dimensionality", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9571", "id": "9571", "proceeding": "http://proceedings.mlr.press/v139/lacotte21a.html", "slides": "/media/icml-2021/Slides/9571.pdf", "author_site": "Jonathan Lacotte, Yifei Wang, Mert Pilanci", "author": "Jonathan Lacotte; Yifei Wang; Mert Pilanci", "abstract": "We propose a randomized algorithm with quadratic convergence rate for convex optimization problems with a self-concordant, composite, strongly convex objective function. Our method is based on performing an approximate Newton step using a random projection of the Hessian. Our first contribution is to show that, at each iteration, the embedding dimension (or sketch size) can be as small as the effective dimension of the Hessian matrix. Leveraging this novel fundamental result, we design an algorithm with a sketch size proportional to the effective dimension and which exhibits a quadratic rate of convergence. This result dramatically improves on the classical linear-quadratic convergence rates of state-of-the-art sub-sampled Newton methods. However, in most practical cases, the effective dimension is not known beforehand, and this raises the question of how to pick a sketch size as small as the effective dimension while preserving a quadratic convergence rate. Our second and main contribution is thus to propose an adaptive sketch size algorithm with quadratic convergence rate and which does not require prior knowledge or estimation of the effective dimension: at each iteration, it starts with a small sketch size, and increases it until quadratic progress is achieved. Importantly, we show that the embedding dimension remains proportional to the effective dimension throughout the entire path and that our method achieves state-of-the-art computational complexity for solving convex optimization programs with a strongly convex component. We discuss and illustrate applications to linear and quadratic programming, as well as logistic regression and other generalized linear models.", "bibtex": "@InProceedings{pmlr-v139-lacotte21a,\n title = \t {Adaptive Newton Sketch: Linear-time Optimization with Quadratic Convergence and Effective Hessian Dimensionality},\n author = {Lacotte, Jonathan and Wang, Yifei and Pilanci, Mert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5926--5936},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lacotte21a/lacotte21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lacotte21a.html},\n abstract = \t {We propose a randomized algorithm with quadratic convergence rate for convex optimization problems with a self-concordant, composite, strongly convex objective function. Our method is based on performing an approximate Newton step using a random projection of the Hessian. Our first contribution is to show that, at each iteration, the embedding dimension (or sketch size) can be as small as the effective dimension of the Hessian matrix. Leveraging this novel fundamental result, we design an algorithm with a sketch size proportional to the effective dimension and which exhibits a quadratic rate of convergence. This result dramatically improves on the classical linear-quadratic convergence rates of state-of-the-art sub-sampled Newton methods. However, in most practical cases, the effective dimension is not known beforehand, and this raises the question of how to pick a sketch size as small as the effective dimension while preserving a quadratic convergence rate. Our second and main contribution is thus to propose an adaptive sketch size algorithm with quadratic convergence rate and which does not require prior knowledge or estimation of the effective dimension: at each iteration, it starts with a small sketch size, and increases it until quadratic progress is achieved. Importantly, we show that the embedding dimension remains proportional to the effective dimension throughout the entire path and that our method achieves state-of-the-art computational complexity for solving convex optimization programs with a strongly convex component. We discuss and illustrate applications to linear and quadratic programming, as well as logistic regression and other generalized linear models.}\n}", "pdf": "http://proceedings.mlr.press/v139/lacotte21a/lacotte21a.pdf", "supp": "", "pdf_size": 1243752, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2089430765192569624&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering, Stanford University; Department of Electrical Engineering, Stanford University; Department of Electrical Engineering, Stanford University", "aff_domain": "stanford.edu;stanford.edu; ", "email": "stanford.edu;stanford.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lacotte21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Sampling for Best Policy Identification in Markov Decision Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9627", "id": "9627", "proceeding": "http://proceedings.mlr.press/v139/marjani21a.html", "slides": "/media/icml-2021/Slides/9627.pdf", "author_site": "Aymen Al Marjani, Alexandre Proutiere", "author": "Aymen Al Marjani; Alexandre Proutiere", "abstract": "We investigate the problem of best-policy identification in discounted Markov Decision Processes (MDPs) when the learner has access to a generative model. The objective is to devise a learning algorithm returning the best policy as early as possible. We first derive a problem-specific lower bound of the sample complexity satisfied by any learning algorithm. This lower bound corresponds to an optimal sample allocation that solves a non-convex program, and hence, is hard to exploit in the design of efficient algorithms. We then provide a simple and tight upper bound of the sample complexity lower bound, whose corresponding nearly-optimal sample allocation becomes explicit. The upper bound depends on specific functionals of the MDP such as the sub-optimality gaps and the variance of the next-state value function, and thus really captures the hardness of the MDP. Finally, we devise KLB-TS (KL Ball Track-and-Stop), an algorithm tracking this nearly-optimal allocation, and provide asymptotic guarantees for its sample complexity (both almost surely and in expectation). The advantages of KLB-TS against state-of-the-art algorithms are discussed and illustrated numerically.", "bibtex": "@InProceedings{pmlr-v139-marjani21a,\n title = \t {Adaptive Sampling for Best Policy Identification in Markov Decision Processes},\n author = {Marjani, Aymen Al and Proutiere, Alexandre},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7459--7468},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/marjani21a/marjani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/marjani21a.html},\n abstract = \t {We investigate the problem of best-policy identification in discounted Markov Decision Processes (MDPs) when the learner has access to a generative model. The objective is to devise a learning algorithm returning the best policy as early as possible. We first derive a problem-specific lower bound of the sample complexity satisfied by any learning algorithm. This lower bound corresponds to an optimal sample allocation that solves a non-convex program, and hence, is hard to exploit in the design of efficient algorithms. We then provide a simple and tight upper bound of the sample complexity lower bound, whose corresponding nearly-optimal sample allocation becomes explicit. The upper bound depends on specific functionals of the MDP such as the sub-optimality gaps and the variance of the next-state value function, and thus really captures the hardness of the MDP. Finally, we devise KLB-TS (KL Ball Track-and-Stop), an algorithm tracking this nearly-optimal allocation, and provide asymptotic guarantees for its sample complexity (both almost surely and in expectation). The advantages of KLB-TS against state-of-the-art algorithms are discussed and illustrated numerically.}\n}", "pdf": "http://proceedings.mlr.press/v139/marjani21a/marjani21a.pdf", "supp": "", "pdf_size": 438532, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3806569734861828486&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "UMPA, ENS Lyon + KTH Royal Institute of Technology; KTH Royal Institute of Technology", "aff_domain": "ens-lyon.fr; ", "email": "ens-lyon.fr; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/marjani21a.html", "aff_unique_index": "0+1;1", "aff_unique_norm": "\u00c9cole Normale Sup\u00e9rieure de Lyon;KTH Royal Institute of Technology", "aff_unique_dep": "UMPA (Unit\u00e9 de Math\u00e9matiques Pures et Appliqu\u00e9es);", "aff_unique_url": "https://www.ens-lyon.fr;https://www.kth.se", "aff_unique_abbr": "ENS Lyon;KTH", "aff_campus_unique_index": "0", "aff_campus_unique": "Lyon;", "aff_country_unique_index": "0+1;1", "aff_country_unique": "France;Sweden" }, { "title": "Additive Error Guarantees for Weighted Low Rank Approximation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9981", "id": "9981", "proceeding": "http://proceedings.mlr.press/v139/bhaskara21a.html", "slides": "", "author_site": "Aditya Bhaskara, Aravinda Kanchana Ruwanpathirana, Pruthuvi Maheshakya Wijewardena", "author": "Aditya Bhaskara; Aravinda Kanchana Ruwanpathirana; Maheshakya Wijewardena", "abstract": "Low-rank approximation is a classic tool in data analysis, where the goal is to approximate a matrix $A$ with a low-rank matrix $L$ so as to minimize the error $\\norm{A - L}_F^2$. However in many applications, approximating some entries is more important than others, which leads to the weighted low rank approximation problem. However, the addition of weights makes the low-rank approximation problem intractable. Thus many works have obtained efficient algorithms under additional structural assumptions on the weight matrix (such as low rank, and appropriate block structure). We study a natural greedy algorithm for weighted low rank approximation and develop a simple condition under which it yields bi-criteria approximation up to a small additive factor in the error. The algorithm involves iteratively computing the top singular vector of an appropriately varying matrix, and is thus easy to implement at scale. Our methods also allow us to study the problem of low rank approximation under $\\ell_p$ norm error.", "bibtex": "@InProceedings{pmlr-v139-bhaskara21a,\n title = \t {Additive Error Guarantees for Weighted Low Rank Approximation},\n author = {Bhaskara, Aditya and Ruwanpathirana, Aravinda Kanchana and Wijewardena, Maheshakya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {874--883},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bhaskara21a/bhaskara21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bhaskara21a.html},\n abstract = \t {Low-rank approximation is a classic tool in data analysis, where the goal is to approximate a matrix $A$ with a low-rank matrix $L$ so as to minimize the error $\\norm{A - L}_F^2$. However in many applications, approximating some entries is more important than others, which leads to the weighted low rank approximation problem. However, the addition of weights makes the low-rank approximation problem intractable. Thus many works have obtained efficient algorithms under additional structural assumptions on the weight matrix (such as low rank, and appropriate block structure). We study a natural greedy algorithm for weighted low rank approximation and develop a simple condition under which it yields bi-criteria approximation up to a small additive factor in the error. The algorithm involves iteratively computing the top singular vector of an appropriately varying matrix, and is thus easy to implement at scale. Our methods also allow us to study the problem of low rank approximation under $\\ell_p$ norm error.}\n}", "pdf": "http://proceedings.mlr.press/v139/bhaskara21a/bhaskara21a.pdf", "supp": "", "pdf_size": 2680166, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14733358090706938165&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "School of Computing, University of Utah, Salt Lake City, Utah, USA; School of Computing, University of Utah, Salt Lake City, Utah, USA; School of Computing, University of Utah, Salt Lake City, Utah, USA", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bhaskara21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Utah", "aff_unique_dep": "School of Computing", "aff_unique_url": "https://www.utah.edu", "aff_unique_abbr": "U of U", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Salt Lake City", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Addressing Catastrophic Forgetting in Few-Shot Problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8817", "id": "8817", "proceeding": "http://proceedings.mlr.press/v139/yap21a.html", "slides": "/media/icml-2021/Slides/8817.pdf", "author_site": "Pauching Yap, Hippolyt Ritter, David Barber", "author": "Pauching Yap; Hippolyt Ritter; David Barber", "abstract": "Neural networks are known to suffer from catastrophic forgetting when trained on sequential datasets. While there have been numerous attempts to solve this problem in large-scale supervised classification, little has been done to overcome catastrophic forgetting in few-shot classification problems. We demonstrate that the popular gradient-based model-agnostic meta-learning algorithm (MAML) indeed suffers from catastrophic forgetting and introduce a Bayesian online meta-learning framework that tackles this problem. Our framework utilises Bayesian online learning and meta-learning along with Laplace approximation and variational inference to overcome catastrophic forgetting in few-shot classification problems. The experimental evaluations demonstrate that our framework can effectively achieve this goal in comparison with various baselines. As an additional utility, we also demonstrate empirically that our framework is capable of meta-learning on sequentially arriving few-shot tasks from a stationary task distribution.", "bibtex": "@InProceedings{pmlr-v139-yap21a,\n title = \t {Addressing Catastrophic Forgetting in Few-Shot Problems},\n author = {Yap, Pauching and Ritter, Hippolyt and Barber, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11909--11919},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yap21a/yap21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yap21a.html},\n abstract = \t {Neural networks are known to suffer from catastrophic forgetting when trained on sequential datasets. While there have been numerous attempts to solve this problem in large-scale supervised classification, little has been done to overcome catastrophic forgetting in few-shot classification problems. We demonstrate that the popular gradient-based model-agnostic meta-learning algorithm (MAML) indeed suffers from catastrophic forgetting and introduce a Bayesian online meta-learning framework that tackles this problem. Our framework utilises Bayesian online learning and meta-learning along with Laplace approximation and variational inference to overcome catastrophic forgetting in few-shot classification problems. The experimental evaluations demonstrate that our framework can effectively achieve this goal in comparison with various baselines. As an additional utility, we also demonstrate empirically that our framework is capable of meta-learning on sequentially arriving few-shot tasks from a stationary task distribution.}\n}", "pdf": "http://proceedings.mlr.press/v139/yap21a/yap21a.pdf", "supp": "", "pdf_size": 3967970, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5331519649661500119&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University College London, London, United Kingdom+Alan Turing Institute, London, United Kingdom; Department of Computer Science, University College London, London, United Kingdom+Alan Turing Institute, London, United Kingdom; Department of Computer Science, University College London, London, United Kingdom+Alan Turing Institute, London, United Kingdom", "aff_domain": "cs.ucl.ac.uk; ; ", "email": "cs.ucl.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yap21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University College London;Alan Turing Institute", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.ucl.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "UCL;ATI", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "London", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Adversarial Combinatorial Bandits with General Non-linear Reward Functions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8419", "id": "8419", "proceeding": "http://proceedings.mlr.press/v139/han21b.html", "slides": "", "author_site": "Yanjun Han, Yining Wang, Xi Chen", "author": "Yanjun Han; Yining Wang; Xi Chen", "abstract": "In this paper we study the adversarial combinatorial bandit with a known non-linear reward function, extending existing work on adversarial linear combinatorial bandit. {The adversarial combinatorial bandit with general non-linear reward is an important open problem in bandit literature, and it is still unclear whether there is a significant gap from the case of linear reward, stochastic bandit, or semi-bandit feedback.} We show that, with $N$ arms and subsets of $K$ arms being chosen at each of $T$ time periods, the minimax optimal regret is $\\widetilde\\Theta_{d}(\\sqrt{N^d T})$ if the reward function is a $d$-degree polynomial with $d< K$, and $\\Theta_K(\\sqrt{N^K T})$ if the reward function is not a low-degree polynomial. {Both bounds are significantly different from the bound $O(\\sqrt{\\mathrm{poly}(N,K)T})$ for the linear case, which suggests that there is a fundamental gap between the linear and non-linear reward structures.} Our result also finds applications to adversarial assortment optimization problem in online recommendation. We show that in the worst-case of adversarial assortment problem, the optimal algorithm must treat each individual $\\binom{N}{K}$ assortment as independent.", "bibtex": "@InProceedings{pmlr-v139-han21b,\n title = \t {Adversarial Combinatorial Bandits with General Non-linear Reward Functions},\n author = {Han, Yanjun and Wang, Yining and Chen, Xi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4030--4039},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/han21b/han21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/han21b.html},\n abstract = \t {In this paper we study the adversarial combinatorial bandit with a known non-linear reward function, extending existing work on adversarial linear combinatorial bandit. {The adversarial combinatorial bandit with general non-linear reward is an important open problem in bandit literature, and it is still unclear whether there is a significant gap from the case of linear reward, stochastic bandit, or semi-bandit feedback.} We show that, with $N$ arms and subsets of $K$ arms being chosen at each of $T$ time periods, the minimax optimal regret is $\\widetilde\\Theta_{d}(\\sqrt{N^d T})$ if the reward function is a $d$-degree polynomial with $d< K$, and $\\Theta_K(\\sqrt{N^K T})$ if the reward function is not a low-degree polynomial. {Both bounds are significantly different from the bound $O(\\sqrt{\\mathrm{poly}(N,K)T})$ for the linear case, which suggests that there is a fundamental gap between the linear and non-linear reward structures.} Our result also finds applications to adversarial assortment optimization problem in online recommendation. We show that in the worst-case of adversarial assortment problem, the optimal algorithm must treat each individual $\\binom{N}{K}$ assortment as independent.}\n}", "pdf": "http://proceedings.mlr.press/v139/han21b/han21b.pdf", "supp": "", "pdf_size": 291627, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1899338256561457177&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Stern School of Business, New York University, New York, NY 10012, USA; Department of Electrical Engineering, Stanford University, Stanford, CA 94305, USA; Warrington College of Business, University of Florida, Gainesville, FL 32611, USA", "aff_domain": "warrington.ufl.edu; ; ", "email": "warrington.ufl.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/han21b.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "New York University;Stanford University;University of Florida", "aff_unique_dep": "Stern School of Business;Department of Electrical Engineering;Warrington College of Business", "aff_unique_url": "https://www.nyu.edu;https://www.stanford.edu;https://warrington.ufl.edu", "aff_unique_abbr": "NYU;Stanford;UF", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "New York;Stanford;Gainesville", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Adversarial Dueling Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10433", "id": "10433", "proceeding": "http://proceedings.mlr.press/v139/saha21a.html", "slides": "/media/icml-2021/Slides/10433.pdf", "author_site": "Aadirupa Saha, Tomer Koren, Yishay Mansour", "author": "Aadirupa Saha; Tomer Koren; Yishay Mansour", "abstract": "We introduce the problem of regret minimization in Adversarial Dueling Bandits. As in classic Dueling Bandits, the learner has to repeatedly choose a pair of items and observe only a relative binary \u2018win-loss\u2019 feedback for this pair, but here this feedback is generated from an arbitrary preference matrix, possibly chosen adversarially. Our main result is an algorithm whose $T$-round regret compared to the \\emph{Borda-winner} from a set of $K$ items is $\\tilde{O}(K^{1/3}T^{2/3})$, as well as a matching $\\Omega(K^{1/3}T^{2/3})$ lower bound. We also prove a similar high probability regret bound. We further consider a simpler \\emph{fixed-gap} adversarial setup, which bridges between two extreme preference feedback models for dueling bandits: stationary preferences and an arbitrary sequence of preferences. For the fixed-gap adversarial setup we give an $\\smash{ \\tilde{O}((K/\\Delta^2)\\log{T}) }$ regret algorithm, where $\\Delta$ is the gap in Borda scores between the best item and all other items, and show a lower bound of $\\Omega(K/\\Delta^2)$ indicating that our dependence on the main problem parameters $K$ and $\\Delta$ is tight (up to logarithmic factors). Finally, we corroborate the theoretical results with empirical evaluations.", "bibtex": "@InProceedings{pmlr-v139-saha21a,\n title = \t {Adversarial Dueling Bandits},\n author = {Saha, Aadirupa and Koren, Tomer and Mansour, Yishay},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9235--9244},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/saha21a/saha21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/saha21a.html},\n abstract = \t {We introduce the problem of regret minimization in Adversarial Dueling Bandits. As in classic Dueling Bandits, the learner has to repeatedly choose a pair of items and observe only a relative binary \u2018win-loss\u2019 feedback for this pair, but here this feedback is generated from an arbitrary preference matrix, possibly chosen adversarially. Our main result is an algorithm whose $T$-round regret compared to the \\emph{Borda-winner} from a set of $K$ items is $\\tilde{O}(K^{1/3}T^{2/3})$, as well as a matching $\\Omega(K^{1/3}T^{2/3})$ lower bound. We also prove a similar high probability regret bound. We further consider a simpler \\emph{fixed-gap} adversarial setup, which bridges between two extreme preference feedback models for dueling bandits: stationary preferences and an arbitrary sequence of preferences. For the fixed-gap adversarial setup we give an $\\smash{ \\tilde{O}((K/\\Delta^2)\\log{T}) }$ regret algorithm, where $\\Delta$ is the gap in Borda scores between the best item and all other items, and show a lower bound of $\\Omega(K/\\Delta^2)$ indicating that our dependence on the main problem parameters $K$ and $\\Delta$ is tight (up to logarithmic factors). Finally, we corroborate the theoretical results with empirical evaluations.}\n}", "pdf": "http://proceedings.mlr.press/v139/saha21a/saha21a.pdf", "supp": "", "pdf_size": 838799, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8733995499876816252&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Microsoft Research, New York City; Blavatnik School of Computer Science, Tel Aviv University + Google Research Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University + Google Research Tel Aviv", "aff_domain": "microsoft.com; ; ", "email": "microsoft.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/saha21a.html", "aff_unique_index": "0;1+2;1+2", "aff_unique_norm": "Microsoft;Tel Aviv University;Google", "aff_unique_dep": "Microsoft Research;Blavatnik School of Computer Science;Google Research", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.tau.ac.il;https://research.google", "aff_unique_abbr": "MSR;TAU;Google", "aff_campus_unique_index": "0;1+1;1+1", "aff_campus_unique": "New York City;Tel Aviv", "aff_country_unique_index": "0;1+1;1+1", "aff_country_unique": "United States;Israel" }, { "title": "Adversarial Multi Class Learning under Weak Supervision with Performance Guarantees", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9605", "id": "9605", "proceeding": "http://proceedings.mlr.press/v139/mazzetto21a.html", "slides": "", "author_site": "Alessio Mazzetto, Cyrus Cousins, Dylan Sam, Stephen Bach, Eli Upfal", "author": "Alessio Mazzetto; Cyrus Cousins; Dylan Sam; Stephen H Bach; Eli Upfal", "abstract": "We develop a rigorous approach for using a set of arbitrarily correlated weak supervision sources in order to solve a multiclass classification task when only a very small set of labeled data is available. Our learning algorithm provably converges to a model that has minimum empirical risk with respect to an adversarial choice over feasible labelings for a set of unlabeled data, where the feasibility of a labeling is computed through constraints defined by rigorously estimated statistics of the weak supervision sources. We show theoretical guarantees for this approach that depend on the information provided by the weak supervision sources. Notably, this method does not require the weak supervision sources to have the same labeling space as the multiclass classification task. We demonstrate the effectiveness of our approach with experiments on various image classification tasks.", "bibtex": "@InProceedings{pmlr-v139-mazzetto21a,\n title = \t {Adversarial Multi Class Learning under Weak Supervision with Performance Guarantees},\n author = {Mazzetto, Alessio and Cousins, Cyrus and Sam, Dylan and Bach, Stephen H and Upfal, Eli},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7534--7543},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mazzetto21a/mazzetto21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mazzetto21a.html},\n abstract = \t {We develop a rigorous approach for using a set of arbitrarily correlated weak supervision sources in order to solve a multiclass classification task when only a very small set of labeled data is available. Our learning algorithm provably converges to a model that has minimum empirical risk with respect to an adversarial choice over feasible labelings for a set of unlabeled data, where the feasibility of a labeling is computed through constraints defined by rigorously estimated statistics of the weak supervision sources. We show theoretical guarantees for this approach that depend on the information provided by the weak supervision sources. Notably, this method does not require the weak supervision sources to have the same labeling space as the multiclass classification task. We demonstrate the effectiveness of our approach with experiments on various image classification tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/mazzetto21a/mazzetto21a.pdf", "supp": "", "pdf_size": 611340, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6313317134707645870&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Brown University; Department of Computer Science, Brown University; Department of Computer Science, Brown University; Department of Computer Science, Brown University; Department of Computer Science, Brown University", "aff_domain": "brown.edu; ; ; ; ", "email": "brown.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mazzetto21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Brown University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.brown.edu", "aff_unique_abbr": "Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adversarial Option-Aware Hierarchical Imitation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8589", "id": "8589", "proceeding": "http://proceedings.mlr.press/v139/jing21a.html", "slides": "", "author_site": "Mingxuan Jing, Wenbing Huang, Fuchun Sun, Xiaojian Ma, Tao Kong, Chuang Gan, Lei Li", "author": "Mingxuan Jing; Wenbing Huang; Fuchun Sun; Xiaojian Ma; Tao Kong; Chuang Gan; Lei Li", "abstract": "It has been a challenge to learning skills for an agent from long-horizon unannotated demonstrations. Existing approaches like Hierarchical Imitation Learning(HIL) are prone to compounding errors or suboptimal solutions. In this paper, we propose Option-GAIL, a novel method to learn skills at long horizon. The key idea of Option-GAIL is modeling the task hierarchy by options and train the policy via generative adversarial optimization. In particular, we propose an Expectation-Maximization(EM)-style algorithm: an E-step that samples the options of expert conditioned on the current learned policy, and an M-step that updates the low- and high-level policies of agent simultaneously to minimize the newly proposed option-occupancy measurement between the expert and the agent. We theoretically prove the convergence of the proposed algorithm. Experiments show that Option-GAIL outperforms other counterparts consistently across a variety of tasks.", "bibtex": "@InProceedings{pmlr-v139-jing21a,\n title = \t {Adversarial Option-Aware Hierarchical Imitation Learning},\n author = {Jing, Mingxuan and Huang, Wenbing and Sun, Fuchun and Ma, Xiaojian and Kong, Tao and Gan, Chuang and Li, Lei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5097--5106},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jing21a/jing21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jing21a.html},\n abstract = \t {It has been a challenge to learning skills for an agent from long-horizon unannotated demonstrations. Existing approaches like Hierarchical Imitation Learning(HIL) are prone to compounding errors or suboptimal solutions. In this paper, we propose Option-GAIL, a novel method to learn skills at long horizon. The key idea of Option-GAIL is modeling the task hierarchy by options and train the policy via generative adversarial optimization. In particular, we propose an Expectation-Maximization(EM)-style algorithm: an E-step that samples the options of expert conditioned on the current learned policy, and an M-step that updates the low- and high-level policies of agent simultaneously to minimize the newly proposed option-occupancy measurement between the expert and the agent. We theoretically prove the convergence of the proposed algorithm. Experiments show that Option-GAIL outperforms other counterparts consistently across a variety of tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/jing21a/jing21a.pdf", "supp": "", "pdf_size": 2419583, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15905939393304829332&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Technology, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China + THU-Bosch JCML center; University of California, Los Angeles, USA; Bytedance AI Lab, Beijing, China; MIT-IBM Watson AI Lab, USA; Bytedance AI Lab, Beijing, China", "aff_domain": "outlook.com;126.com;tsinghua.edu.cn; ; ; ; ", "email": "outlook.com;126.com;tsinghua.edu.cn; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/jing21a.html", "aff_unique_index": "0;0;0+0;1;2;3;2", "aff_unique_norm": "Tsinghua University;University of California, Los Angeles;ByteDance;IBM", "aff_unique_dep": "Department of Computer Science and Technology;;AI Lab;AI Lab", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.ucla.edu;https://www.bytedance.com;", "aff_unique_abbr": "THU;UCLA;Bytedance AI Lab;MIT-IBM AI Lab", "aff_campus_unique_index": "0;0;0;2;0;0", "aff_campus_unique": "Beijing;;Los Angeles", "aff_country_unique_index": "0;0;0+0;1;0;1;0", "aff_country_unique": "China;United States" }, { "title": "Adversarial Policy Learning in Two-player Competitive Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9779", "id": "9779", "proceeding": "http://proceedings.mlr.press/v139/guo21b.html", "slides": "", "author_site": "Wenbo Guo, Xian Wu, Sui Huang, Xinyu Xing", "author": "Wenbo Guo; Xian Wu; Sui Huang; Xinyu Xing", "abstract": "In a two-player deep reinforcement learning task, recent work shows an attacker could learn an adversarial policy that triggers a target agent to perform poorly and even react in an undesired way. However, its efficacy heavily relies upon the zero-sum assumption made in the two-player game. In this work, we propose a new adversarial learning algorithm. It addresses the problem by resetting the optimization goal in the learning process and designing a new surrogate optimization function. Our experiments show that our method significantly improves adversarial agents\u2019 exploitability compared with the state-of-art attack. Besides, we also discover that our method could augment an agent with the ability to abuse the target game\u2019s unfairness. Finally, we show that agents adversarially re-trained against our adversarial agents could obtain stronger adversary-resistance.", "bibtex": "@InProceedings{pmlr-v139-guo21b,\n title = \t {Adversarial Policy Learning in Two-player Competitive Games},\n author = {Guo, Wenbo and Wu, Xian and Huang, Sui and Xing, Xinyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3910--3919},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/guo21b/guo21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/guo21b.html},\n abstract = \t {In a two-player deep reinforcement learning task, recent work shows an attacker could learn an adversarial policy that triggers a target agent to perform poorly and even react in an undesired way. However, its efficacy heavily relies upon the zero-sum assumption made in the two-player game. In this work, we propose a new adversarial learning algorithm. It addresses the problem by resetting the optimization goal in the learning process and designing a new surrogate optimization function. Our experiments show that our method significantly improves adversarial agents\u2019 exploitability compared with the state-of-art attack. Besides, we also discover that our method could augment an agent with the ability to abuse the target game\u2019s unfairness. Finally, we show that agents adversarially re-trained against our adversarial agents could obtain stronger adversary-resistance.}\n}", "pdf": "http://proceedings.mlr.press/v139/guo21b/guo21b.pdf", "supp": "", "pdf_size": 1759176, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16789305191996443914&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; Netflix Inc., Los Gatos, CA, USA; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA", "aff_domain": "ist.psu.edu; ; ; ", "email": "ist.psu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/guo21b.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Pennsylvania State University;Netflix Inc.", "aff_unique_dep": "College of Information Sciences and Technology;", "aff_unique_url": "https://www.psu.edu;https://www.netflix.com", "aff_unique_abbr": "PSU;Netflix", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "State College;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adversarial Purification with Score-based Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10745", "id": "10745", "proceeding": "http://proceedings.mlr.press/v139/yoon21a.html", "slides": "/media/icml-2021/Slides/10745.pdf", "author_site": "Jongmin Yoon, Sung Ju Hwang, Juho Lee", "author": "Jongmin Yoon; Sung Ju Hwang; Juho Lee", "abstract": "While adversarial training is considered as a standard defense method against adversarial attacks for image classifiers, adversarial purification, which purifies attacked images into clean images with a standalone purification, model has shown promises as an alternative defense method. Recently, an EBM trained with MCMC has been highlighted as a purification model, where an attacked image is purified by running a long Markov-chain using the gradients of the EBM. Yet, the practicality of the adversarial purification using an EBM remains questionable because the number of MCMC steps required for such purification is too large. In this paper, we propose a novel adversarial purification method based on an EBM trained with DSM. We show that an EBM trained with DSM can quickly purify attacked images within a few steps. We further introduce a simple yet effective randomized purification scheme that injects random noises into images before purification. This process screens the adversarial perturbations imposed on images by the random noises and brings the images to the regime where the EBM can denoise well. We show that our purification method is robust against various attacks and demonstrate its state-of-the-art performances.", "bibtex": "@InProceedings{pmlr-v139-yoon21a,\n title = \t {Adversarial Purification with Score-based Generative Models},\n author = {Yoon, Jongmin and Hwang, Sung Ju and Lee, Juho},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12062--12072},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoon21a/yoon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoon21a.html},\n abstract = \t {While adversarial training is considered as a standard defense method against adversarial attacks for image classifiers, adversarial purification, which purifies attacked images into clean images with a standalone purification, model has shown promises as an alternative defense method. Recently, an EBM trained with MCMC has been highlighted as a purification model, where an attacked image is purified by running a long Markov-chain using the gradients of the EBM. Yet, the practicality of the adversarial purification using an EBM remains questionable because the number of MCMC steps required for such purification is too large. In this paper, we propose a novel adversarial purification method based on an EBM trained with DSM. We show that an EBM trained with DSM can quickly purify attacked images within a few steps. We further introduce a simple yet effective randomized purification scheme that injects random noises into images before purification. This process screens the adversarial perturbations imposed on images by the random noises and brings the images to the regime where the EBM can denoise well. We show that our purification method is robust against various attacks and demonstrate its state-of-the-art performances.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoon21a/yoon21a.pdf", "supp": "", "pdf_size": 461967, "gs_citation": 202, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1510322463041774819&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Korea Advanced Institute of Science and Technology, Daejeon, Korea+AITRICS, Seoul, Korea; Korea Advanced Institute of Science and Technology, Daejeon, Korea+AITRICS, Seoul, Korea; Korea Advanced Institute of Science and Technology, Daejeon, Korea+AITRICS, Seoul, Korea", "aff_domain": "kaist.ac.kr; ;kaist.ac.kr", "email": "kaist.ac.kr; ;kaist.ac.kr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yoon21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;AITRICS", "aff_unique_dep": ";", "aff_unique_url": "https://www.kaist.ac.kr;", "aff_unique_abbr": "KAIST;", "aff_campus_unique_index": "0+1;0+1;0+1", "aff_campus_unique": "Daejeon;Seoul", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "South Korea" }, { "title": "Adversarial Robustness Guarantees for Random Deep Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8649", "id": "8649", "proceeding": "http://proceedings.mlr.press/v139/de-palma21a.html", "slides": "/media/icml-2021/Slides/8649.pdf", "author_site": "Giacomo De Palma, Bobak T Kiani, Seth Lloyd", "author": "Giacomo De Palma; Bobak Kiani; Seth Lloyd", "abstract": "The reliability of deep learning algorithms is fundamentally challenged by the existence of adversarial examples, which are incorrectly classified inputs that are extremely close to a correctly classified input. We explore the properties of adversarial examples for deep neural networks with random weights and biases, and prove that for any p$\\geq$1, the \\ell^p distance of any given input from the classification boundary scales as one over the square root of the dimension of the input times the \\ell^p norm of the input. The results are based on the recently proved equivalence between Gaussian processes and deep neural networks in the limit of infinite width of the hidden layers, and are validated with experiments on both random deep neural networks and deep neural networks trained on the MNIST and CIFAR10 datasets. The results constitute a fundamental advance in the theoretical understanding of adversarial examples, and open the way to a thorough theoretical characterization of the relation between network architecture and robustness to adversarial perturbations.", "bibtex": "@InProceedings{pmlr-v139-de-palma21a,\n title = \t {Adversarial Robustness Guarantees for Random Deep Neural Networks},\n author = {De Palma, Giacomo and Kiani, Bobak and Lloyd, Seth},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2522--2534},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/de-palma21a/de-palma21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/de-palma21a.html},\n abstract = \t {The reliability of deep learning algorithms is fundamentally challenged by the existence of adversarial examples, which are incorrectly classified inputs that are extremely close to a correctly classified input. We explore the properties of adversarial examples for deep neural networks with random weights and biases, and prove that for any p$\\geq$1, the \\ell^p distance of any given input from the classification boundary scales as one over the square root of the dimension of the input times the \\ell^p norm of the input. The results are based on the recently proved equivalence between Gaussian processes and deep neural networks in the limit of infinite width of the hidden layers, and are validated with experiments on both random deep neural networks and deep neural networks trained on the MNIST and CIFAR10 datasets. The results constitute a fundamental advance in the theoretical understanding of adversarial examples, and open the way to a thorough theoretical characterization of the relation between network architecture and robustness to adversarial perturbations.}\n}", "pdf": "http://proceedings.mlr.press/v139/de-palma21a/de-palma21a.pdf", "supp": "", "pdf_size": 975568, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2504173380091047222&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Scuola Normale Superiore, Pisa, Italy + Department of Mechanical Engineering, MIT, Cambridge MA, USA + Research Laboratory of Electronics, MIT, Cambridge MA, USA; Research Laboratory of Electronics, MIT, Cambridge MA, USA + Department of Electrical Engineering & Computer Science, MIT, Cambridge MA, USA; Department of Mechanical Engineering, MIT, Cambridge MA, USA + Research Laboratory of Electronics, MIT, Cambridge MA, USA", "aff_domain": "sns.it;mit.edu;mit.edu", "email": "sns.it;mit.edu;mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/de-palma21a.html", "aff_unique_index": "0+1+1;1+1;1+1", "aff_unique_norm": "Scuola Normale Superiore;Massachusetts Institute of Technology", "aff_unique_dep": ";Department of Mechanical Engineering", "aff_unique_url": "https://www.sns.it;https://web.mit.edu", "aff_unique_abbr": "SNS;MIT", "aff_campus_unique_index": "0+1+1;1+1;1+1", "aff_campus_unique": "Pisa;Cambridge", "aff_country_unique_index": "0+1+1;1+1;1+1", "aff_country_unique": "Italy;United States" }, { "title": "Affine Invariant Analysis of Frank-Wolfe on Strongly Convex Sets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9815", "id": "9815", "proceeding": "http://proceedings.mlr.press/v139/kerdreux21a.html", "slides": "", "author_site": "Thomas Kerdreux, Lewis Liu, Simon Lacoste-Julien, Damien Scieur", "author": "Thomas Kerdreux; Lewis Liu; Simon Lacoste-Julien; Damien Scieur", "abstract": "It is known that the Frank-Wolfe (FW) algorithm, which is affine covariant, enjoys faster convergence rates than $\\mathcal{O}\\left(1/K\\right)$ when the constraint set is strongly convex. However, these results rely on norm-dependent assumptions, usually incurring non-affine invariant bounds, in contradiction with FW\u2019s affine covariant property. In this work, we introduce new structural assumptions on the problem (such as the directional smoothness) and derive an affine invariant, norm-independent analysis of Frank-Wolfe. We show that our rates are better than any other known convergence rates of FW in this setting. Based on our analysis, we propose an affine invariant backtracking line-search. Interestingly, we show that typical backtracking line-searches using smoothness of the objective function present similar performances than its affine invariant counterpart, despite using affine dependent norms in the step size\u2019s computation.", "bibtex": "@InProceedings{pmlr-v139-kerdreux21a,\n title = \t {Affine Invariant Analysis of Frank-Wolfe on Strongly Convex Sets},\n author = {Kerdreux, Thomas and Liu, Lewis and Lacoste-Julien, Simon and Scieur, Damien},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5398--5408},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kerdreux21a/kerdreux21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kerdreux21a.html},\n abstract = \t {It is known that the Frank-Wolfe (FW) algorithm, which is affine covariant, enjoys faster convergence rates than $\\mathcal{O}\\left(1/K\\right)$ when the constraint set is strongly convex. However, these results rely on norm-dependent assumptions, usually incurring non-affine invariant bounds, in contradiction with FW\u2019s affine covariant property. In this work, we introduce new structural assumptions on the problem (such as the directional smoothness) and derive an affine invariant, norm-independent analysis of Frank-Wolfe. We show that our rates are better than any other known convergence rates of FW in this setting. Based on our analysis, we propose an affine invariant backtracking line-search. Interestingly, we show that typical backtracking line-searches using smoothness of the objective function present similar performances than its affine invariant counterpart, despite using affine dependent norms in the step size\u2019s computation.}\n}", "pdf": "http://proceedings.mlr.press/v139/kerdreux21a/kerdreux21a.pdf", "supp": "", "pdf_size": 559077, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13950099175709022670&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Zuse Institute, Berlin; D\u00b4epartement d\u2019informatique et de recherche op \u00b4erationnelle (DIRO), Universit \u00b4e de Montr \u00b4eal + Mila, Montr \u00b4eal + Samsung SAIT AI Lab, Montr \u00b4eal + Canada CIFAR AI Chair; D\u00b4epartement d\u2019informatique et de recherche op \u00b4erationnelle (DIRO), Universit \u00b4e de Montr \u00b4eal + Mila, Montr \u00b4eal + Samsung SAIT AI Lab, Montr \u00b4eal + Canada CIFAR AI Chair; Samsung SAIT AI Lab, Montr \u00b4eal + Mila, Montr \u00b4eal + D\u00b4epartement d\u2019informatique et de recherche op \u00b4erationnelle (DIRO), Universit \u00b4e de Montr \u00b4eal", "aff_domain": "gmail.com; ; ;gmail.com", "email": "gmail.com; ; ;gmail.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kerdreux21a.html", "aff_unique_index": "0;1+2+3+4;1+2+3+4;3+2+1", "aff_unique_norm": "Zuse Institute Berlin;Universit\u00e9 de Montr\u00e9al;Mila;Samsung;Canadian Institute for Advanced Research", "aff_unique_dep": ";D\u00e9partement d\u2019informatique et de recherche op\u00e9rationnelle (DIRO);;AI Lab;AI Chair", "aff_unique_url": "https://www.zib.de;https://www.umontreal.ca;https://mila.quebec;https://www.sait.samsung.com;https://www.cifar.ca", "aff_unique_abbr": "ZIB;UdeM;Mila;SAIT;CIFAR", "aff_campus_unique_index": "0;1+1+1;1+1+1;1+1+1", "aff_campus_unique": "Berlin;Montr\u00e9al;", "aff_country_unique_index": "0;1+1+1+1;1+1+1+1;1+1+1", "aff_country_unique": "Germany;Canada" }, { "title": "Aggregating From Multiple Target-Shifted Sources", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10163", "id": "10163", "proceeding": "http://proceedings.mlr.press/v139/shui21a.html", "slides": "/media/icml-2021/Slides/10163.pdf", "author_site": "Changjian Shui, Zijian Li, Jiaqi Li, Christian Gagne, Charles X. Ling, Boyu Wang", "author": "Changjian Shui; Zijian Li; Jiaqi Li; Christian Gagn\u00e9; Charles X Ling; Boyu Wang", "abstract": "Multi-source domain adaptation aims at leveraging the knowledge from multiple tasks for predicting a related target domain. Hence, a crucial aspect is to properly combine different sources based on their relations. In this paper, we analyzed the problem for aggregating source domains with different label distributions, where most recent source selection approaches fail. Our proposed algorithm differs from previous approaches in two key ways: the model aggregates multiple sources mainly through the similarity of semantic conditional distribution rather than marginal distribution; the model proposes a unified framework to select relevant sources for three popular scenarios, i.e., domain adaptation with limited label on target domain, unsupervised domain adaptation and label partial unsupervised domain adaption. We evaluate the proposed method through extensive experiments. The empirical results significantly outperform the baselines.", "bibtex": "@InProceedings{pmlr-v139-shui21a,\n title = \t {Aggregating From Multiple Target-Shifted Sources},\n author = {Shui, Changjian and Li, Zijian and Li, Jiaqi and Gagn{\\'e}, Christian and Ling, Charles X and Wang, Boyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9638--9648},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shui21a/shui21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shui21a.html},\n abstract = \t {Multi-source domain adaptation aims at leveraging the knowledge from multiple tasks for predicting a related target domain. Hence, a crucial aspect is to properly combine different sources based on their relations. In this paper, we analyzed the problem for aggregating source domains with different label distributions, where most recent source selection approaches fail. Our proposed algorithm differs from previous approaches in two key ways: the model aggregates multiple sources mainly through the similarity of semantic conditional distribution rather than marginal distribution; the model proposes a unified framework to select relevant sources for three popular scenarios, i.e., domain adaptation with limited label on target domain, unsupervised domain adaptation and label partial unsupervised domain adaption. We evaluate the proposed method through extensive experiments. The empirical results significantly outperform the baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/shui21a/shui21a.pdf", "supp": "", "pdf_size": 2419856, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16974883036021419315&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/shui21a.html" }, { "title": "Agnostic Learning of Halfspaces with Gradient Descent via Soft Margins", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10395", "id": "10395", "proceeding": "http://proceedings.mlr.press/v139/frei21a.html", "slides": "/media/icml-2021/Slides/10395.pdf", "author_site": "Spencer Frei, Yuan Cao, Quanquan Gu", "author": "Spencer Frei; Yuan Cao; Quanquan Gu", "abstract": "We analyze the properties of gradient descent on convex surrogates for the zero-one loss for the agnostic learning of halfspaces. We show that when a quantity we refer to as the \\textit{soft margin} is well-behaved\u2014a condition satisfied by log-concave isotropic distributions among others\u2014minimizers of convex surrogates for the zero-one loss are approximate minimizers for the zero-one loss itself. As standard convex optimization arguments lead to efficient guarantees for minimizing convex surrogates of the zero-one loss, our methods allow for the first positive guarantees for the classification error of halfspaces learned by gradient descent using the binary cross-entropy or hinge loss in the presence of agnostic label noise.", "bibtex": "@InProceedings{pmlr-v139-frei21a,\n title = \t {Agnostic Learning of Halfspaces with Gradient Descent via Soft Margins},\n author = {Frei, Spencer and Cao, Yuan and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3417--3426},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/frei21a/frei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/frei21a.html},\n abstract = \t {We analyze the properties of gradient descent on convex surrogates for the zero-one loss for the agnostic learning of halfspaces. We show that when a quantity we refer to as the \\textit{soft margin} is well-behaved\u2014a condition satisfied by log-concave isotropic distributions among others\u2014minimizers of convex surrogates for the zero-one loss are approximate minimizers for the zero-one loss itself. As standard convex optimization arguments lead to efficient guarantees for minimizing convex surrogates of the zero-one loss, our methods allow for the first positive guarantees for the classification error of halfspaces learned by gradient descent using the binary cross-entropy or hinge loss in the presence of agnostic label noise.}\n}", "pdf": "http://proceedings.mlr.press/v139/frei21a/frei21a.pdf", "supp": "", "pdf_size": 380083, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12441960967387269031&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Statistics, UCLA; Department of Computer Science, UCLA; Department of Computer Science, UCLA", "aff_domain": "stats.ucla.edu;cs.ucla.edu;cs.ucla.edu", "email": "stats.ucla.edu;cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/frei21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Align, then memorise: the dynamics of learning with feedback alignment", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8673", "id": "8673", "proceeding": "http://proceedings.mlr.press/v139/refinetti21a.html", "slides": "", "author_site": "Maria Refinetti, St\u00e9phane d'Ascoli, Ruben Ohana, Sebastian Goldt", "author": "Maria Refinetti; St\u00e9phane D\u2019Ascoli; Ruben Ohana; Sebastian Goldt", "abstract": "Direct Feedback Alignment (DFA) is emerging as an efficient and biologically plausible alternative to backpropagation for training deep neural networks. Despite relying on random feedback weights for the backward pass, DFA successfully trains state-of-the-art models such as Transformers. On the other hand, it notoriously fails to train convolutional networks. An understanding of the inner workings of DFA to explain these diverging results remains elusive. Here, we propose a theory of feedback alignment algorithms. We first show that learning in shallow networks proceeds in two steps: an alignment phase, where the model adapts its weights to align the approximate gradient with the true gradient of the loss function, is followed by a memorisation phase, where the model focuses on fitting the data. This two-step process has a degeneracy breaking effect: out of all the low-loss solutions in the landscape, a net-work trained with DFA naturally converges to the solution which maximises gradient alignment. We also identify a key quantity underlying alignment in deep linear networks: the conditioning of the alignment matrices. The latter enables a detailed understanding of the impact of data structure on alignment, and suggests a simple explanation for the well-known failure of DFA to train convolutional neural networks. Numerical experiments on MNIST and CIFAR10 clearly demonstrate degeneracy breaking in deep non-linear networks and show that the align-then-memorize process occurs sequentially from the bottom layers of the network to the top.", "bibtex": "@InProceedings{pmlr-v139-refinetti21a,\n title = \t {Align, then memorise: the dynamics of learning with feedback alignment},\n author = {Refinetti, Maria and D'Ascoli, St{\\'e}phane and Ohana, Ruben and Goldt, Sebastian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8925--8935},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/refinetti21a/refinetti21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/refinetti21a.html},\n abstract = \t {Direct Feedback Alignment (DFA) is emerging as an efficient and biologically plausible alternative to backpropagation for training deep neural networks. Despite relying on random feedback weights for the backward pass, DFA successfully trains state-of-the-art models such as Transformers. On the other hand, it notoriously fails to train convolutional networks. An understanding of the inner workings of DFA to explain these diverging results remains elusive. Here, we propose a theory of feedback alignment algorithms. We first show that learning in shallow networks proceeds in two steps: an alignment phase, where the model adapts its weights to align the approximate gradient with the true gradient of the loss function, is followed by a memorisation phase, where the model focuses on fitting the data. This two-step process has a degeneracy breaking effect: out of all the low-loss solutions in the landscape, a net-work trained with DFA naturally converges to the solution which maximises gradient alignment. We also identify a key quantity underlying alignment in deep linear networks: the conditioning of the alignment matrices. The latter enables a detailed understanding of the impact of data structure on alignment, and suggests a simple explanation for the well-known failure of DFA to train convolutional neural networks. Numerical experiments on MNIST and CIFAR10 clearly demonstrate degeneracy breaking in deep non-linear networks and show that the align-then-memorize process occurs sequentially from the bottom layers of the network to the top.}\n}", "pdf": "http://proceedings.mlr.press/v139/refinetti21a/refinetti21a.pdf", "supp": "", "pdf_size": 2432360, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10115011183031848291&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Physics, Ecole Normale Sup\u00b4erieure, Paris, France+IdePHICS laboratory, EPFL; Department of Physics, Ecole Normale Sup\u00b4erieure, Paris, France+Facebook AI Research, Paris, France; Department of Physics, Ecole Normale Sup\u00b4erieure, Paris, France+LightOn, Paris, France; International School of Advanced Studies (SISSA), Trieste, Italy", "aff_domain": "; ; ;sissa.it", "email": "; ; ;sissa.it", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/refinetti21a.html", "aff_unique_index": "0+1;0+2;0+3;4", "aff_unique_norm": "Ecole Normale Sup\u00e9rieure;EPFL;Meta;LightOn;International School of Advanced Studies", "aff_unique_dep": "Department of Physics;IdePHICS laboratory;Facebook AI Research;;", "aff_unique_url": "https://www.ens.fr;https://www.epfl.ch;https://research.facebook.com;;https://www.sissa.it", "aff_unique_abbr": "ENS;EPFL;FAIR;;SISSA", "aff_campus_unique_index": "0;0+0;0;2", "aff_campus_unique": "Paris;;Trieste", "aff_country_unique_index": "0+1;0+0;0+0;2", "aff_country_unique": "France;Switzerland;Italy" }, { "title": "Almost Optimal Anytime Algorithm for Batched Multi-Armed Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8491", "id": "8491", "proceeding": "http://proceedings.mlr.press/v139/jin21c.html", "slides": "", "author_site": "Tianyuan Jin, Jing Tang, Pan Xu, Keke Huang, Xiaokui Xiao, Quanquan Gu", "author": "Tianyuan Jin; Jing Tang; Pan Xu; Keke Huang; Xiaokui Xiao; Quanquan Gu", "abstract": "In batched multi-armed bandit problems, the learner can adaptively pull arms and adjust strategy in batches. In many real applications, not only the regret but also the batch complexity need to be optimized. Existing batched bandit algorithms usually assume that the time horizon T is known in advance. However, many applications involve an unpredictable stopping time. In this paper, we study the anytime batched multi-armed bandit problem. We propose an anytime algorithm that achieves the asymptotically optimal regret for exponential families of reward distributions with $O(\\log \\log T \\ilog^{\\alpha} (T))$ \\footnote{Notation \\ilog^{\\alpha} (T) is the result of iteratively applying the logarithm function on T for \\alpha times, e.g., \\ilog^{3} (T)=\\log\\log\\log T.} batches, where $\\alpha\\in O_{T}(1)$. Moreover, we prove that for any constant c>0, no algorithm can achieve the asymptotically optimal regret within c\\log\\log T batches.", "bibtex": "@InProceedings{pmlr-v139-jin21c,\n title = \t {Almost Optimal Anytime Algorithm for Batched Multi-Armed Bandits},\n author = {Jin, Tianyuan and Tang, Jing and Xu, Pan and Huang, Keke and Xiao, Xiaokui and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5065--5073},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jin21c/jin21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/jin21c.html},\n abstract = \t {In batched multi-armed bandit problems, the learner can adaptively pull arms and adjust strategy in batches. In many real applications, not only the regret but also the batch complexity need to be optimized. Existing batched bandit algorithms usually assume that the time horizon T is known in advance. However, many applications involve an unpredictable stopping time. In this paper, we study the anytime batched multi-armed bandit problem. We propose an anytime algorithm that achieves the asymptotically optimal regret for exponential families of reward distributions with $O(\\log \\log T \\ilog^{\\alpha} (T))$ \\footnote{Notation \\ilog^{\\alpha} (T) is the result of iteratively applying the logarithm function on T for \\alpha times, e.g., \\ilog^{3} (T)=\\log\\log\\log T.} batches, where $\\alpha\\in O_{T}(1)$. Moreover, we prove that for any constant c>0, no algorithm can achieve the asymptotically optimal regret within c\\log\\log T batches.}\n}", "pdf": "http://proceedings.mlr.press/v139/jin21c/jin21c.pdf", "supp": "", "pdf_size": 467997, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10257575121173377072&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computing, National University of Singapore, Singapore; Data Science and Analytics Thrust, The Hong Kong University of Science and Technology; Department of Computer Science, University of California, Los Angeles, CA 90095, USA; School of Computing, National University of Singapore, Singapore; School of Computing, National University of Singapore, Singapore; Department of Computer Science, University of California, Los Angeles, CA 90095, USA", "aff_domain": "nus.edu.sg;cs.ucla.edu; ; ; ; ", "email": "nus.edu.sg;cs.ucla.edu; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/jin21c.html", "aff_unique_index": "0;1;2;0;0;2", "aff_unique_norm": "National University of Singapore;Hong Kong University of Science and Technology;University of California, Los Angeles", "aff_unique_dep": "School of Computing;Data Science and Analytics Thrust;Department of Computer Science", "aff_unique_url": "https://www.nus.edu.sg;https://www.ust.hk;https://www.ucla.edu", "aff_unique_abbr": "NUS;HKUST;UCLA", "aff_campus_unique_index": "1;2;2", "aff_campus_unique": ";Hong Kong SAR;Los Angeles", "aff_country_unique_index": "0;1;2;0;0;2", "aff_country_unique": "Singapore;China;United States" }, { "title": "AlphaNet: Improved Training of Supernets with Alpha-Divergence", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9165", "id": "9165", "proceeding": "http://proceedings.mlr.press/v139/wang21i.html", "slides": "/media/icml-2021/Slides/9165.pdf", "author_site": "Dilin Wang, Chengyue Gong, Meng Li, Qiang Liu, Vikas Chandra", "author": "Dilin Wang; Chengyue Gong; Meng Li; Qiang Liu; Vikas Chandra", "abstract": "Weight-sharing neural architecture search (NAS) is an effective technique for automating efficient neural architecture design. Weight-sharing NAS builds a supernet that assembles all the architectures as its sub-networks and jointly trains the supernet with the sub-networks. The success of weight-sharing NAS heavily relies on distilling the knowledge of the supernet to the sub-networks. However, we find that the widely used distillation divergence, i.e., KL divergence, may lead to student sub-networks that over-estimate or under-estimate the uncertainty of the teacher supernet, leading to inferior performance of the sub-networks. In this work, we propose to improve the supernet training with a more generalized alpha-divergence. By adaptively selecting the alpha-divergence, we simultaneously prevent the over-estimation or under-estimation of the uncertainty of the teacher model. We apply the proposed alpha-divergence based supernets training to both slimmable neural networks and weight-sharing NAS, and demonstrate significant improvements. Specifically, our discovered model family, AlphaNet, outperforms prior-art models on a wide range of FLOPs regimes, including BigNAS, Once-for-All networks, and AttentiveNAS. We achieve ImageNet top-1 accuracy of 80.0% with only 444M FLOPs. Our code and pretrained models are available at https://github.com/facebookresearch/AlphaNet.", "bibtex": "@InProceedings{pmlr-v139-wang21i,\n title = \t {AlphaNet: Improved Training of Supernets with Alpha-Divergence},\n author = {Wang, Dilin and Gong, Chengyue and Li, Meng and Liu, Qiang and Chandra, Vikas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10760--10771},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21i/wang21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21i.html},\n abstract = \t {Weight-sharing neural architecture search (NAS) is an effective technique for automating efficient neural architecture design. Weight-sharing NAS builds a supernet that assembles all the architectures as its sub-networks and jointly trains the supernet with the sub-networks. The success of weight-sharing NAS heavily relies on distilling the knowledge of the supernet to the sub-networks. However, we find that the widely used distillation divergence, i.e., KL divergence, may lead to student sub-networks that over-estimate or under-estimate the uncertainty of the teacher supernet, leading to inferior performance of the sub-networks. In this work, we propose to improve the supernet training with a more generalized alpha-divergence. By adaptively selecting the alpha-divergence, we simultaneously prevent the over-estimation or under-estimation of the uncertainty of the teacher model. We apply the proposed alpha-divergence based supernets training to both slimmable neural networks and weight-sharing NAS, and demonstrate significant improvements. Specifically, our discovered model family, AlphaNet, outperforms prior-art models on a wide range of FLOPs regimes, including BigNAS, Once-for-All networks, and AttentiveNAS. We achieve ImageNet top-1 accuracy of 80.0% with only 444M FLOPs. Our code and pretrained models are available at https://github.com/facebookresearch/AlphaNet.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21i/wang21i.pdf", "supp": "", "pdf_size": 5842975, "gs_citation": 83, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16040812221590233106&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Facebook; Department of Computer Science, The University of Texas at Austin + Facebook; Facebook; Department of Computer Science, The University of Texas at Austin; Facebook", "aff_domain": "fb.com;cs.utexas.edu;fb.com;cs.utexas.edu;fb.com", "email": "fb.com;cs.utexas.edu;fb.com;cs.utexas.edu;fb.com", "github": "https://github.com/facebookresearch/AlphaNet", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wang21i.html", "aff_unique_index": "0;1+0;0;1;0", "aff_unique_norm": "Meta;University of Texas at Austin", "aff_unique_dep": "Facebook, Inc.;Department of Computer Science", "aff_unique_url": "https://www.facebook.com;https://www.utexas.edu", "aff_unique_abbr": "FB;UT Austin", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "0;0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Alternative Microfoundations for Strategic Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9047", "id": "9047", "proceeding": "http://proceedings.mlr.press/v139/jagadeesan21a.html", "slides": "", "author_site": "Meena Jagadeesan, Celestine Mendler-D\u00fcnner, Moritz Hardt", "author": "Meena Jagadeesan; Celestine Mendler-D\u00fcnner; Moritz Hardt", "abstract": "When reasoning about strategic behavior in a machine learning context it is tempting to combine standard microfoundations of rational agents with the statistical decision theory underlying classification. In this work, we argue that a direct combination of these ingredients leads to brittle solution concepts of limited descriptive and prescriptive value. First, we show that rational agents with perfect information produce discontinuities in the aggregate response to a decision rule that we often do not observe empirically. Second, when any positive fraction of agents is not perfectly strategic, desirable stable points\u2014where the classifier is optimal for the data it entails\u2014no longer exist. Third, optimal decision rules under standard microfoundations maximize a measure of negative externality known as social burden within a broad class of assumptions about agent behavior. Recognizing these limitations we explore alternatives to standard microfoundations for binary classification. We describe desiderata that help navigate the space of possible assumptions about agent responses, and we then propose the noisy response model. Inspired by smoothed analysis and empirical observations, noisy response incorporates imperfection in the agent responses, which we show mitigates the limitations of standard microfoundations. Our model retains analytical tractability, leads to more robust insights about stable points, and imposes a lower social burden at optimality.", "bibtex": "@InProceedings{pmlr-v139-jagadeesan21a,\n title = \t {Alternative Microfoundations for Strategic Classification},\n author = {Jagadeesan, Meena and Mendler-D{\\\"u}nner, Celestine and Hardt, Moritz},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4687--4697},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jagadeesan21a/jagadeesan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jagadeesan21a.html},\n abstract = \t {When reasoning about strategic behavior in a machine learning context it is tempting to combine standard microfoundations of rational agents with the statistical decision theory underlying classification. In this work, we argue that a direct combination of these ingredients leads to brittle solution concepts of limited descriptive and prescriptive value. First, we show that rational agents with perfect information produce discontinuities in the aggregate response to a decision rule that we often do not observe empirically. Second, when any positive fraction of agents is not perfectly strategic, desirable stable points\u2014where the classifier is optimal for the data it entails\u2014no longer exist. Third, optimal decision rules under standard microfoundations maximize a measure of negative externality known as social burden within a broad class of assumptions about agent behavior. Recognizing these limitations we explore alternatives to standard microfoundations for binary classification. We describe desiderata that help navigate the space of possible assumptions about agent responses, and we then propose the noisy response model. Inspired by smoothed analysis and empirical observations, noisy response incorporates imperfection in the agent responses, which we show mitigates the limitations of standard microfoundations. Our model retains analytical tractability, leads to more robust insights about stable points, and imposes a lower social burden at optimality.}\n}", "pdf": "http://proceedings.mlr.press/v139/jagadeesan21a/jagadeesan21a.pdf", "supp": "", "pdf_size": 4529554, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15075179842642456998&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu; ; ", "email": "berkeley.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jagadeesan21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Amortized Conditional Normalized Maximum Likelihood: Reliable Out of Distribution Uncertainty Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9305", "id": "9305", "proceeding": "http://proceedings.mlr.press/v139/zhou21b.html", "slides": "/media/icml-2021/Slides/9305.pdf", "author_site": "Aurick Zhou, Sergey Levine", "author": "Aurick Zhou; Sergey Levine", "abstract": "While deep neural networks provide good performance for a range of challenging tasks, calibration and uncertainty estimation remain major challenges, especially under distribution shift. In this paper, we propose the amortized conditional normalized maximum likelihood (ACNML) method as a scalable general-purpose approach for uncertainty estimation, calibration, and out-of-distribution robustness with deep networks. Our algorithm builds on the conditional normalized maximum likelihood (CNML) coding scheme, which has minimax optimal properties according to the minimum description length principle, but is computationally intractable to evaluate exactly for all but the simplest of model classes. We propose to use approximate Bayesian inference technqiues to produce a tractable approximation to the CNML distribution. Our approach can be combined with any approximate inference algorithm that provides tractable posterior densities over model parameters. We demonstrate that ACNML compares favorably to a number of prior techniques for uncertainty estimation in terms of calibration when faced with distribution shift.", "bibtex": "@InProceedings{pmlr-v139-zhou21b,\n title = \t {Amortized Conditional Normalized Maximum Likelihood: Reliable Out of Distribution Uncertainty Estimation},\n author = {Zhou, Aurick and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12803--12812},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21b/zhou21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21b.html},\n abstract = \t {While deep neural networks provide good performance for a range of challenging tasks, calibration and uncertainty estimation remain major challenges, especially under distribution shift. In this paper, we propose the amortized conditional normalized maximum likelihood (ACNML) method as a scalable general-purpose approach for uncertainty estimation, calibration, and out-of-distribution robustness with deep networks. Our algorithm builds on the conditional normalized maximum likelihood (CNML) coding scheme, which has minimax optimal properties according to the minimum description length principle, but is computationally intractable to evaluate exactly for all but the simplest of model classes. We propose to use approximate Bayesian inference technqiues to produce a tractable approximation to the CNML distribution. Our approach can be combined with any approximate inference algorithm that provides tractable posterior densities over model parameters. We demonstrate that ACNML compares favorably to a number of prior techniques for uncertainty estimation in terms of calibration when faced with distribution shift.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21b/zhou21b.pdf", "supp": "", "pdf_size": 949645, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18428979525932062259&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "EECS, University of California, Berkeley, USA; EECS, University of California, Berkeley, USA", "aff_domain": "berkeley.edu; ", "email": "berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhou21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "EECS", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "An Algorithm for Stochastic and Adversarial Bandits with Switching Costs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9467", "id": "9467", "proceeding": "http://proceedings.mlr.press/v139/rouyer21a.html", "slides": "", "author_site": "Chlo\u00e9 Rouyer, Yevgeny Seldin, Nicol\u00f2 Cesa-Bianchi", "author": "Chlo\u00e9 Rouyer; Yevgeny Seldin; Nicol\u00f2 Cesa-Bianchi", "abstract": "We propose an algorithm for stochastic and adversarial multiarmed bandits with switching costs, where the algorithm pays a price $\\lambda$ every time it switches the arm being played. Our algorithm is based on adaptation of the Tsallis-INF algorithm of Zimmert and Seldin (2021) and requires no prior knowledge of the regime or time horizon. In the oblivious adversarial setting it achieves the minimax optimal regret bound of $ O( (\\lambda K)^{1/3}T^{2/3} + \\sqrt{KT})$, where $T$ is the time horizon and $K$ is the number of arms. In the stochastically constrained adversarial regime, which includes the stochastic regime as a special case, it achieves a regret bound of $O((\\lambda K)^{2/3} T^{1/3} + \\ln T)\\sum_{i \\neq i^*} \\Delta_i^{-1})$, where $\\Delta_i$ are suboptimality gaps and $i^*$ is the unique optimal arm. In the special case of $\\lambda = 0$ (no switching costs), both bounds are minimax optimal within constants. We also explore variants of the problem, where switching cost is allowed to change over time. We provide experimental evaluation showing competitiveness of our algorithm with the relevant baselines in the stochastic, stochastically constrained adversarial, and adversarial regimes with fixed switching cost.", "bibtex": "@InProceedings{pmlr-v139-rouyer21a,\n title = \t {An Algorithm for Stochastic and Adversarial Bandits with Switching Costs},\n author = {Rouyer, Chlo{\\'e} and Seldin, Yevgeny and Cesa-Bianchi, Nicol{\\`o}},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9127--9135},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rouyer21a/rouyer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rouyer21a.html},\n abstract = \t {We propose an algorithm for stochastic and adversarial multiarmed bandits with switching costs, where the algorithm pays a price $\\lambda$ every time it switches the arm being played. Our algorithm is based on adaptation of the Tsallis-INF algorithm of Zimmert and Seldin (2021) and requires no prior knowledge of the regime or time horizon. In the oblivious adversarial setting it achieves the minimax optimal regret bound of $ O( (\\lambda K)^{1/3}T^{2/3} + \\sqrt{KT})$, where $T$ is the time horizon and $K$ is the number of arms. In the stochastically constrained adversarial regime, which includes the stochastic regime as a special case, it achieves a regret bound of $O((\\lambda K)^{2/3} T^{1/3} + \\ln T)\\sum_{i \\neq i^*} \\Delta_i^{-1})$, where $\\Delta_i$ are suboptimality gaps and $i^*$ is the unique optimal arm. In the special case of $\\lambda = 0$ (no switching costs), both bounds are minimax optimal within constants. We also explore variants of the problem, where switching cost is allowed to change over time. We provide experimental evaluation showing competitiveness of our algorithm with the relevant baselines in the stochastic, stochastically constrained adversarial, and adversarial regimes with fixed switching cost.}\n}", "pdf": "http://proceedings.mlr.press/v139/rouyer21a/rouyer21a.pdf", "supp": "", "pdf_size": 439756, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14087563024352571022&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Copenhagen, Denmark; Department of Computer Science, University of Copenhagen, Denmark; DSRC & Dept. of Computer Science, Universit `a degli Studi di Milano, Milano, Italy", "aff_domain": "di.ku.dk; ; ", "email": "di.ku.dk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/rouyer21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Copenhagen;Universit\u00e0 degli Studi di Milano", "aff_unique_dep": "Department of Computer Science;Dept. of Computer Science", "aff_unique_url": "https://www.ku.dk;https://www.unimi.it", "aff_unique_abbr": "UCPH;UniMi", "aff_campus_unique_index": "1", "aff_campus_unique": ";Milano", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Denmark;Italy" }, { "title": "An End-to-End Framework for Molecular Conformation Generation via Bilevel Programming", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8721", "id": "8721", "proceeding": "http://proceedings.mlr.press/v139/xu21f.html", "slides": "/media/icml-2021/Slides/8721.pdf", "author_site": "Minkai Xu, Wujie Wang, Shitong Luo, Chence Shi, Yoshua Bengio, Rafael Gomez-Bombarelli, Jian Tang", "author": "Minkai Xu; Wujie Wang; Shitong Luo; Chence Shi; Yoshua Bengio; Rafael Gomez-Bombarelli; Jian Tang", "abstract": "Predicting molecular conformations (or 3D structures) from molecular graphs is a fundamental problem in many applications. Most existing approaches are usually divided into two steps by first predicting the distances between atoms and then generating a 3D structure through optimizing a distance geometry problem. However, the distances predicted with such two-stage approaches may not be able to consistently preserve the geometry of local atomic neighborhoods, making the generated structures unsatisfying. In this paper, we propose an end-to-end solution for molecular conformation prediction called ConfVAE based on the conditional variational autoencoder framework. Specifically, the molecular graph is first encoded in a latent space, and then the 3D structures are generated by solving a principled bilevel optimization program. Extensive experiments on several benchmark data sets prove the effectiveness of our proposed approach over existing state-of-the-art approaches. Code is available at \\url{https://github.com/MinkaiXu/ConfVAE-ICML21}.", "bibtex": "@InProceedings{pmlr-v139-xu21f,\n title = \t {An End-to-End Framework for Molecular Conformation Generation via Bilevel Programming},\n author = {Xu, Minkai and Wang, Wujie and Luo, Shitong and Shi, Chence and Bengio, Yoshua and Gomez-Bombarelli, Rafael and Tang, Jian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11537--11547},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21f/xu21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21f.html},\n abstract = \t {Predicting molecular conformations (or 3D structures) from molecular graphs is a fundamental problem in many applications. Most existing approaches are usually divided into two steps by first predicting the distances between atoms and then generating a 3D structure through optimizing a distance geometry problem. However, the distances predicted with such two-stage approaches may not be able to consistently preserve the geometry of local atomic neighborhoods, making the generated structures unsatisfying. In this paper, we propose an end-to-end solution for molecular conformation prediction called ConfVAE based on the conditional variational autoencoder framework. Specifically, the molecular graph is first encoded in a latent space, and then the 3D structures are generated by solving a principled bilevel optimization program. Extensive experiments on several benchmark data sets prove the effectiveness of our proposed approach over existing state-of-the-art approaches. Code is available at \\url{https://github.com/MinkaiXu/ConfVAE-ICML21}.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21f/xu21f.pdf", "supp": "", "pdf_size": 3582460, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=914718927564575831&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Mila - Qu\u00b4ebec AI Institute, Canada+Universit\u00b4e de Montr\u00b4eal, Canada+Canadian Institute for Advanced Research (CIFAR), Canada+HEC Montr\u00b4eal, Canada; Massachusetts Institute of Technology, USA; Peking University, China; Mila - Qu\u00b4ebec AI Institute, Canada+Universit\u00b4e de Montr\u00b4eal, Canada+Canadian Institute for Advanced Research (CIFAR), Canada; Mila - Qu\u00b4ebec AI Institute, Canada+Universit\u00b4e de Montr\u00b4eal, Canada+Canadian Institute for Advanced Research (CIFAR), Canada+HEC Montr\u00b4eal, Canada; Massachusetts Institute of Technology, USA; Mila - Qu\u00b4ebec AI Institute, Canada+Universit\u00b4e de Montr\u00b4eal, Canada+Canadian Institute for Advanced Research (CIFAR), Canada+HEC Montr\u00b4eal, Canada", "aff_domain": "umontreal.ca; ; ; ; ; ; ", "email": "umontreal.ca; ; ; ; ; ; ", "github": "https://github.com/MinkaiXu/ConfVAE-ICML21", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/xu21f.html", "aff_unique_index": "0+1+2+3;4;5;0+1+2;0+1+2+3;4;0+1+2+3", "aff_unique_norm": "Mila - Quebec AI Institute;Universit\u00e9 de Montr\u00e9al;Canadian Institute for Advanced Research;HEC Montr\u00e9al;Massachusetts Institute of Technology;Peking University", "aff_unique_dep": "AI Institute;;;;;", "aff_unique_url": "https://mila.quebec;https://www.umontreal.ca;https://www.cifar.ca;https://www.hec.ca;https://web.mit.edu;http://www.pku.edu.cn", "aff_unique_abbr": "Mila;UdeM;CIFAR;HEC;MIT;Peking U", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0+0;1;2;0+0+0;0+0+0+0;1;0+0+0+0", "aff_country_unique": "Canada;United States;China" }, { "title": "An Identifiable Double VAE For Disentangled Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10249", "id": "10249", "proceeding": "http://proceedings.mlr.press/v139/mita21a.html", "slides": "", "author_site": "Graziano Mita, Maurizio Filippone, Pietro Michiardi", "author": "Graziano Mita; Maurizio Filippone; Pietro Michiardi", "abstract": "A large part of the literature on learning disentangled representations focuses on variational autoencoders (VAEs). Recent developments demonstrate that disentanglement cannot be obtained in a fully unsupervised setting without inductive biases on models and data. However, Khemakhem et al., AISTATS, 2020 suggest that employing a particular form of factorized prior, conditionally dependent on auxiliary variables complementing input observations, can be one such bias, resulting in an identifiable model with guarantees on disentanglement. Working along this line, we propose a novel VAE-based generative model with theoretical guarantees on identifiability. We obtain our conditional prior over the latents by learning an optimal representation, which imposes an additional strength on their regularization. We also extend our method to semi-supervised settings. Experimental results indicate superior performance with respect to state-of-the-art approaches, according to several established metrics proposed in the literature on disentanglement.", "bibtex": "@InProceedings{pmlr-v139-mita21a,\n title = \t {An Identifiable Double VAE For Disentangled Representations},\n author = {Mita, Graziano and Filippone, Maurizio and Michiardi, Pietro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7769--7779},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mita21a/mita21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mita21a.html},\n abstract = \t {A large part of the literature on learning disentangled representations focuses on variational autoencoders (VAEs). Recent developments demonstrate that disentanglement cannot be obtained in a fully unsupervised setting without inductive biases on models and data. However, Khemakhem et al., AISTATS, 2020 suggest that employing a particular form of factorized prior, conditionally dependent on auxiliary variables complementing input observations, can be one such bias, resulting in an identifiable model with guarantees on disentanglement. Working along this line, we propose a novel VAE-based generative model with theoretical guarantees on identifiability. We obtain our conditional prior over the latents by learning an optimal representation, which imposes an additional strength on their regularization. We also extend our method to semi-supervised settings. Experimental results indicate superior performance with respect to state-of-the-art approaches, according to several established metrics proposed in the literature on disentanglement.}\n}", "pdf": "http://proceedings.mlr.press/v139/mita21a/mita21a.pdf", "supp": "", "pdf_size": 395410, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7947563744074589507&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "EURECOM, 06410 Biot (France)+SAP Labs France, 06250 Mougins (France); EURECOM, 06410 Biot (France); EURECOM, 06410 Biot (France)", "aff_domain": "eurecom.fr;eurecom.fr;eurecom.fr", "email": "eurecom.fr;eurecom.fr;eurecom.fr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/mita21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "EURECOM;SAP Labs France", "aff_unique_dep": ";", "aff_unique_url": "https://www.eurecom.fr;https://labs.sap/", "aff_unique_abbr": ";SAP Labs", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "France" }, { "title": "An Information-Geometric Distance on the Space of Tasks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10479", "id": "10479", "proceeding": "http://proceedings.mlr.press/v139/gao21a.html", "slides": "/media/icml-2021/Slides/10479.pdf", "author_site": "Yansong Gao, Pratik Chaudhari", "author": "Yansong Gao; Pratik Chaudhari", "abstract": "This paper prescribes a distance between learning tasks modeled as joint distributions on data and labels. Using tools in information geometry, the distance is defined to be the length of the shortest weight trajectory on a Riemannian manifold as a classifier is fitted on an interpolated task. The interpolated task evolves from the source to the target task using an optimal transport formulation. This distance, which we call the \"coupled transfer distance\" can be compared across different classifier architectures. We develop an algorithm to compute the distance which iteratively transports the marginal on the data of the source task to that of the target task while updating the weights of the classifier to track this evolving data distribution. We develop theory to show that our distance captures the intuitive idea that a good transfer trajectory is the one that keeps the generalization gap small during transfer, in particular at the end on the target task. We perform thorough empirical validation and analysis across diverse image classification datasets to show that the coupled transfer distance correlates strongly with the difficulty of fine-tuning.", "bibtex": "@InProceedings{pmlr-v139-gao21a,\n title = \t {An Information-Geometric Distance on the Space of Tasks},\n author = {Gao, Yansong and Chaudhari, Pratik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3553--3563},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gao21a/gao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gao21a.html},\n abstract = \t {This paper prescribes a distance between learning tasks modeled as joint distributions on data and labels. Using tools in information geometry, the distance is defined to be the length of the shortest weight trajectory on a Riemannian manifold as a classifier is fitted on an interpolated task. The interpolated task evolves from the source to the target task using an optimal transport formulation. This distance, which we call the \"coupled transfer distance\" can be compared across different classifier architectures. We develop an algorithm to compute the distance which iteratively transports the marginal on the data of the source task to that of the target task while updating the weights of the classifier to track this evolving data distribution. We develop theory to show that our distance captures the intuitive idea that a good transfer trajectory is the one that keeps the generalization gap small during transfer, in particular at the end on the target task. We perform thorough empirical validation and analysis across diverse image classification datasets to show that the coupled transfer distance correlates strongly with the difficulty of fine-tuning.}\n}", "pdf": "http://proceedings.mlr.press/v139/gao21a/gao21a.pdf", "supp": "", "pdf_size": 6392674, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7919912457553501173&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Applied Mathematics and Computational Science, University of Pennsylvania; Department of Electrical and Systems Engineering, University of Pennsylvania", "aff_domain": "sas.upenn.edu;seas.upenn.edu", "email": "sas.upenn.edu;seas.upenn.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gao21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "Department of Applied Mathematics and Computational Science", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "An Integer Linear Programming Framework for Mining Constraints from Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9731", "id": "9731", "proceeding": "http://proceedings.mlr.press/v139/meng21a.html", "slides": "", "author_site": "Tao Meng, Kai-Wei Chang", "author": "Tao Meng; Kai-Wei Chang", "abstract": "Structured output prediction problems (e.g., sequential tagging, hierarchical multi-class classification) often involve constraints over the output space. These constraints interact with the learned models to filter infeasible solutions and facilitate in building an accountable system. However, despite constraints are useful, they are often based on hand-crafted rules. This raises a question \u2013 can we mine constraints and rules from data based on a learning algorithm? In this paper, we present a general framework for mining constraints from data. In particular, we consider the inference in structured output prediction as an integer linear programming (ILP) problem. Then, given the coefficients of the objective function and the corresponding solution, we mine the underlying constraints by estimating the outer and inner polytopes of the feasible set. We verify the proposed constraint mining algorithm in various synthetic and real-world applications and demonstrate that the proposed approach successfully identifies the feasible set at scale. In particular, we show that our approach can learn to solve 9x9 Sudoku puzzles and minimal spanning tree problems from examples without providing the underlying rules. Our algorithm can also integrate with a neural network model to learn the hierarchical label structure of a multi-label classification task. Besides, we provide theoretical analysis about the tightness of the polytopes and the reliability of the mined constraints.", "bibtex": "@InProceedings{pmlr-v139-meng21a,\n title = \t {An Integer Linear Programming Framework for Mining Constraints from Data},\n author = {Meng, Tao and Chang, Kai-Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7619--7631},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/meng21a/meng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/meng21a.html},\n abstract = \t {Structured output prediction problems (e.g., sequential tagging, hierarchical multi-class classification) often involve constraints over the output space. These constraints interact with the learned models to filter infeasible solutions and facilitate in building an accountable system. However, despite constraints are useful, they are often based on hand-crafted rules. This raises a question \u2013 can we mine constraints and rules from data based on a learning algorithm? In this paper, we present a general framework for mining constraints from data. In particular, we consider the inference in structured output prediction as an integer linear programming (ILP) problem. Then, given the coefficients of the objective function and the corresponding solution, we mine the underlying constraints by estimating the outer and inner polytopes of the feasible set. We verify the proposed constraint mining algorithm in various synthetic and real-world applications and demonstrate that the proposed approach successfully identifies the feasible set at scale. In particular, we show that our approach can learn to solve 9x9 Sudoku puzzles and minimal spanning tree problems from examples without providing the underlying rules. Our algorithm can also integrate with a neural network model to learn the hierarchical label structure of a multi-label classification task. Besides, we provide theoretical analysis about the tightness of the polytopes and the reliability of the mined constraints.}\n}", "pdf": "http://proceedings.mlr.press/v139/meng21a/meng21a.pdf", "supp": "", "pdf_size": 964566, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15134580706124032020&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of California, Los Angeles, USA; Department of Computer Science, University of California, Los Angeles, USA", "aff_domain": "cs.ucla.edu;cs.ucla.edu", "email": "cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/meng21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "An exact solver for the Weston-Watkins SVM subproblem", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10195", "id": "10195", "proceeding": "http://proceedings.mlr.press/v139/wang21u.html", "slides": "/media/icml-2021/Slides/10195.pdf", "author_site": "Yutong Wang, Clay Scott", "author": "Yutong Wang; Clayton Scott", "abstract": "Recent empirical evidence suggests that the Weston-Watkins support vector machine is among the best performing multiclass extensions of the binary SVM. Current state-of-the-art solvers repeatedly solve a particular subproblem approximately using an iterative strategy. In this work, we propose an algorithm that solves the subproblem exactly using a novel reparametrization of the Weston-Watkins dual problem. For linear WW-SVMs, our solver shows significant speed-up over the state-of-the-art solver when the number of classes is large. Our exact subproblem solver also allows us to prove linear convergence of the overall solver.", "bibtex": "@InProceedings{pmlr-v139-wang21u,\n title = \t {An exact solver for the Weston-Watkins SVM subproblem},\n author = {Wang, Yutong and Scott, Clayton},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10894--10904},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21u/wang21u.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21u.html},\n abstract = \t {Recent empirical evidence suggests that the Weston-Watkins support vector machine is among the best performing multiclass extensions of the binary SVM. Current state-of-the-art solvers repeatedly solve a particular subproblem approximately using an iterative strategy. In this work, we propose an algorithm that solves the subproblem exactly using a novel reparametrization of the Weston-Watkins dual problem. For linear WW-SVMs, our solver shows significant speed-up over the state-of-the-art solver when the number of classes is large. Our exact subproblem solver also allows us to prove linear convergence of the overall solver.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21u/wang21u.pdf", "supp": "", "pdf_size": 478945, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3159763216882198120&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical Engineering and Computer Science, University of Michigan + Department of Statistics, University of Michigan; Department of Electrical Engineering and Computer Science, University of Michigan + Department of Statistics, University of Michigan", "aff_domain": "umich.edu;umich.edu", "email": "umich.edu;umich.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wang21u.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Ann Arbor", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "Analysis of stochastic Lanczos quadrature for spectrum approximation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9115", "id": "9115", "proceeding": "http://proceedings.mlr.press/v139/chen21s.html", "slides": "/media/icml-2021/Slides/9115.pdf", "author_site": "Tyler Chen, Thomas Trogdon, Shashanka Ubaru", "author": "Tyler Chen; Thomas Trogdon; Shashanka Ubaru", "abstract": "The cumulative empirical spectral measure (CESM) $\\Phi[\\mathbf{A}] : \\mathbb{R} \\to [0,1]$ of a $n\\times n$ symmetric matrix $\\mathbf{A}$ is defined as the fraction of eigenvalues of $\\mathbf{A}$ less than a given threshold, i.e., $\\Phi[\\mathbf{A}](x) := \\sum_{i=1}^{n} \\frac{1}{n} {\\large\\unicode{x1D7D9}}[ \\lambda_i[\\mathbf{A}]\\leq x]$. Spectral sums $\\operatorname{tr}(f[\\mathbf{A}])$ can be computed as the Riemann\u2013Stieltjes integral of $f$ against $\\Phi[\\mathbf{A}]$, so the task of estimating CESM arises frequently in a number of applications, including machine learning. We present an error analysis for stochastic Lanczos quadrature (SLQ). We show that SLQ obtains an approximation to the CESM within a Wasserstein distance of $t \\: | \\lambda_{\\text{max}}[\\mathbf{A}] - \\lambda_{\\text{min}}[\\mathbf{A}] |$ with probability at least $1-\\eta$, by applying the Lanczos algorithm for $\\lceil 12 t^{-1} + \\frac{1}{2} \\rceil$ iterations to $\\lceil 4 ( n+2 )^{-1}t^{-2} \\ln(2n\\eta^{-1}) \\rceil$ vectors sampled independently and uniformly from the unit sphere. We additionally provide (matrix-dependent) a posteriori error bounds for the Wasserstein and Kolmogorov\u2013Smirnov distances between the output of this algorithm and the true CESM. The quality of our bounds is demonstrated using numerical experiments.", "bibtex": "@InProceedings{pmlr-v139-chen21s,\n title = \t {Analysis of stochastic Lanczos quadrature for spectrum approximation},\n author = {Chen, Tyler and Trogdon, Thomas and Ubaru, Shashanka},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1728--1739},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21s/chen21s.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21s.html},\n abstract = \t {The cumulative empirical spectral measure (CESM) $\\Phi[\\mathbf{A}] : \\mathbb{R} \\to [0,1]$ of a $n\\times n$ symmetric matrix $\\mathbf{A}$ is defined as the fraction of eigenvalues of $\\mathbf{A}$ less than a given threshold, i.e., $\\Phi[\\mathbf{A}](x) := \\sum_{i=1}^{n} \\frac{1}{n} {\\large\\unicode{x1D7D9}}[ \\lambda_i[\\mathbf{A}]\\leq x]$. Spectral sums $\\operatorname{tr}(f[\\mathbf{A}])$ can be computed as the Riemann\u2013Stieltjes integral of $f$ against $\\Phi[\\mathbf{A}]$, so the task of estimating CESM arises frequently in a number of applications, including machine learning. We present an error analysis for stochastic Lanczos quadrature (SLQ). We show that SLQ obtains an approximation to the CESM within a Wasserstein distance of $t \\: | \\lambda_{\\text{max}}[\\mathbf{A}] - \\lambda_{\\text{min}}[\\mathbf{A}] |$ with probability at least $1-\\eta$, by applying the Lanczos algorithm for $\\lceil 12 t^{-1} + \\frac{1}{2} \\rceil$ iterations to $\\lceil 4 ( n+2 )^{-1}t^{-2} \\ln(2n\\eta^{-1}) \\rceil$ vectors sampled independently and uniformly from the unit sphere. We additionally provide (matrix-dependent) a posteriori error bounds for the Wasserstein and Kolmogorov\u2013Smirnov distances between the output of this algorithm and the true CESM. The quality of our bounds is demonstrated using numerical experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21s/chen21s.pdf", "supp": "", "pdf_size": 895775, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3718766219336547017&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Applied Mathematics, University of Washington, Seattle, Washington, USA; Department of Applied Mathematics, University of Washington, Seattle, Washington, USA; IBM T.J. Watson Research Center, Yorktown Heights, New York, USA", "aff_domain": "uw.edu; ; ", "email": "uw.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chen21s.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Washington;IBM", "aff_unique_dep": "Department of Applied Mathematics;IBM T.J. Watson Research Center", "aff_unique_url": "https://www.washington.edu;https://www.ibm.com/research/watson", "aff_unique_abbr": "UW;IBM Watson", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Seattle;Yorktown Heights", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Analyzing the tree-layer structure of Deep Forests", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9653", "id": "9653", "proceeding": "http://proceedings.mlr.press/v139/arnould21a.html", "slides": "", "author_site": "Ludovic Arnould, Claire Boyer, Erwan Scornet", "author": "Ludovic Arnould; Claire Boyer; Erwan Scornet", "abstract": "Random forests on the one hand, and neural networks on the other hand, have met great success in the machine learning community for their predictive performance. Combinations of both have been proposed in the literature, notably leading to the so-called deep forests (DF) (Zhou & Feng,2019). In this paper, our aim is not to benchmark DF performances but to investigate instead their underlying mechanisms. Additionally, DF architecture can be generally simplified into more simple and computationally efficient shallow forest networks. Despite some instability, the latter may outperform standard predictive tree-based methods. We exhibit a theoretical framework in which a shallow tree network is shown to enhance the performance of classical decision trees. In such a setting, we provide tight theoretical lower and upper bounds on its excess risk. These theoretical results show the interest of tree-network architectures for well-structured data provided that the first layer, acting as a data encoder, is rich enough.", "bibtex": "@InProceedings{pmlr-v139-arnould21a,\n title = \t {Analyzing the tree-layer structure of Deep Forests},\n author = {Arnould, Ludovic and Boyer, Claire and Scornet, Erwan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {342--350},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/arnould21a/arnould21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/arnould21a.html},\n abstract = \t {Random forests on the one hand, and neural networks on the other hand, have met great success in the machine learning community for their predictive performance. Combinations of both have been proposed in the literature, notably leading to the so-called deep forests (DF) (Zhou & Feng,2019). In this paper, our aim is not to benchmark DF performances but to investigate instead their underlying mechanisms. Additionally, DF architecture can be generally simplified into more simple and computationally efficient shallow forest networks. Despite some instability, the latter may outperform standard predictive tree-based methods. We exhibit a theoretical framework in which a shallow tree network is shown to enhance the performance of classical decision trees. In such a setting, we provide tight theoretical lower and upper bounds on its excess risk. These theoretical results show the interest of tree-network architectures for well-structured data provided that the first layer, acting as a data encoder, is rich enough.}\n}", "pdf": "http://proceedings.mlr.press/v139/arnould21a/arnould21a.pdf", "supp": "", "pdf_size": 4713782, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17939505627227567021&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/arnould21a.html" }, { "title": "Annealed Flow Transport Monte Carlo", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10537", "id": "10537", "proceeding": "http://proceedings.mlr.press/v139/arbel21a.html", "slides": "", "author_site": "Michael Arbel, Alexander Matthews, Arnaud Doucet", "author": "Michael Arbel; Alex Matthews; Arnaud Doucet", "abstract": "Annealed Importance Sampling (AIS) and its Sequential Monte Carlo (SMC) extensions are state-of-the-art methods for estimating normalizing constants of probability distributions. We propose here a novel Monte Carlo algorithm, Annealed Flow Transport (AFT), that builds upon AIS and SMC and combines them with normalizing flows (NFs) for improved performance. This method transports a set of particles using not only importance sampling (IS), Markov chain Monte Carlo (MCMC) and resampling steps - as in SMC, but also relies on NFs which are learned sequentially to push particles towards the successive annealed targets. We provide limit theorems for the resulting Monte Carlo estimates of the normalizing constant and expectations with respect to the target distribution. Additionally, we show that a continuous-time scaling limit of the population version of AFT is given by a Feynman\u2013Kac measure which simplifies to the law of a controlled diffusion for expressive NFs. We demonstrate experimentally the benefits and limitations of our methodology on a variety of applications.", "bibtex": "@InProceedings{pmlr-v139-arbel21a,\n title = \t {Annealed Flow Transport Monte Carlo},\n author = {Arbel, Michael and Matthews, Alex and Doucet, Arnaud},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {318--330},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/arbel21a/arbel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/arbel21a.html},\n abstract = \t {Annealed Importance Sampling (AIS) and its Sequential Monte Carlo (SMC) extensions are state-of-the-art methods for estimating normalizing constants of probability distributions. We propose here a novel Monte Carlo algorithm, Annealed Flow Transport (AFT), that builds upon AIS and SMC and combines them with normalizing flows (NFs) for improved performance. This method transports a set of particles using not only importance sampling (IS), Markov chain Monte Carlo (MCMC) and resampling steps - as in SMC, but also relies on NFs which are learned sequentially to push particles towards the successive annealed targets. We provide limit theorems for the resulting Monte Carlo estimates of the normalizing constant and expectations with respect to the target distribution. Additionally, we show that a continuous-time scaling limit of the population version of AFT is given by a Feynman\u2013Kac measure which simplifies to the law of a controlled diffusion for expressive NFs. We demonstrate experimentally the benefits and limitations of our methodology on a variety of applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/arbel21a/arbel21a.pdf", "supp": "", "pdf_size": 430272, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6587859836209119306&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Gatsby Computational Neuroscience Unit, University College London; DeepMind; DeepMind", "aff_domain": "gmail.com;google.com;google.com", "email": "gmail.com;google.com;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/arbel21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University College London;DeepMind", "aff_unique_dep": "Gatsby Computational Neuroscience Unit;", "aff_unique_url": "https://www.ucl.ac.uk;https://deepmind.com", "aff_unique_abbr": "UCL;DeepMind", "aff_campus_unique_index": "0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Approximate Group Fairness for Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10725", "id": "10725", "proceeding": "http://proceedings.mlr.press/v139/li21j.html", "slides": "", "author_site": "Bo Li, Lijun Li, Ankang Sun, Chenhao Wang, Yingfan Wang", "author": "Bo Li; Lijun Li; Ankang Sun; Chenhao Wang; Yingfan Wang", "abstract": "We incorporate group fairness into the algorithmic centroid clustering problem, where $k$ centers are to be located to serve $n$ agents distributed in a metric space. We refine the notion of proportional fairness proposed in [Chen et al., ICML 2019] as {\\em core fairness}. A $k$-clustering is in the core if no coalition containing at least $n/k$ agents can strictly decrease their total distance by deviating to a new center together. Our solution concept is motivated by the situation where agents are able to coordinate and utilities are transferable. A string of existence, hardness and approximability results is provided. Particularly, we propose two dimensions to relax core requirements: one is on the degree of distance improvement, and the other is on the size of deviating coalition. For both relaxations and their combination, we study the extent to which relaxed core fairness can be satisfied in metric spaces including line, tree and general metric space, and design approximation algorithms accordingly. We also conduct experiments on synthetic and real-world data to examine the performance of our algorithms.", "bibtex": "@InProceedings{pmlr-v139-li21j,\n title = \t {Approximate Group Fairness for Clustering},\n author = {Li, Bo and Li, Lijun and Sun, Ankang and Wang, Chenhao and Wang, Yingfan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6381--6391},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21j/li21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21j.html},\n abstract = \t {We incorporate group fairness into the algorithmic centroid clustering problem, where $k$ centers are to be located to serve $n$ agents distributed in a metric space. We refine the notion of proportional fairness proposed in [Chen et al., ICML 2019] as {\\em core fairness}. A $k$-clustering is in the core if no coalition containing at least $n/k$ agents can strictly decrease their total distance by deviating to a new center together. Our solution concept is motivated by the situation where agents are able to coordinate and utilities are transferable. A string of existence, hardness and approximability results is provided. Particularly, we propose two dimensions to relax core requirements: one is on the degree of distance improvement, and the other is on the size of deviating coalition. For both relaxations and their combination, we study the extent to which relaxed core fairness can be satisfied in metric spaces including line, tree and general metric space, and design approximation algorithms accordingly. We also conduct experiments on synthetic and real-world data to examine the performance of our algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21j/li21j.pdf", "supp": "", "pdf_size": 828089, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16691234268373477060&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computing, The Hong Kong Polytechnic University, Hong Kong, China; School of Mathematical Sciences, Ocean University of China, Qingdao, China; Warwick Business School, University of Warwick, United Kingdom; University of Nebraska-Lincoln, United States; Department of Computer Science, Duke University, United States", "aff_domain": "my.cityu.edu.hk; ; ; ; ", "email": "my.cityu.edu.hk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/li21j.html", "aff_unique_index": "0;1;2;3;4", "aff_unique_norm": "Hong Kong Polytechnic University;Ocean University of China;University of Warwick;University of Nebraska-Lincoln;Duke University", "aff_unique_dep": "Department of Computing;School of Mathematical Sciences;Warwick Business School;;Department of Computer Science", "aff_unique_url": "https://www.polyu.edu.hk;http://www.ouc.edu.cn;https://www.warwick.ac.uk;https://www.unl.edu;https://www.duke.edu", "aff_unique_abbr": "PolyU;OUC;Warwick;UNL;Duke", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Hong Kong;Qingdao;Warwick;", "aff_country_unique_index": "0;0;1;2;2", "aff_country_unique": "China;United Kingdom;United States" }, { "title": "Approximating a Distribution Using Weight Queries", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8457", "id": "8457", "proceeding": "http://proceedings.mlr.press/v139/barak21a.html", "slides": "/media/icml-2021/Slides/8457.pdf", "author_site": "Nadav Barak, Sivan Sabato", "author": "Nadav Barak; Sivan Sabato", "abstract": "We consider a novel challenge: approximating a distribution without the ability to randomly sample from that distribution. We study how such an approximation can be obtained using *weight queries*. Given some data set of examples, a weight query presents one of the examples to an oracle, which returns the probability, according to the target distribution, of observing examples similar to the presented example. This oracle can represent, for instance, counting queries to a database of the target population, or an interface to a search engine which returns the number of results that match a given search. We propose an interactive algorithm that iteratively selects data set examples and performs corresponding weight queries. The algorithm finds a reweighting of the data set that approximates the weights according to the target distribution, using a limited number of weight queries. We derive an approximation bound on the total variation distance between the reweighting found by the algorithm and the best achievable reweighting. Our algorithm takes inspiration from the UCB approach common in multi-armed bandits problems, and combines it with a new discrepancy estimator and a greedy iterative procedure. In addition to our theoretical guarantees, we demonstrate in experiments the advantages of the proposed algorithm over several baselines. A python implementation of the proposed algorithm and of all the experiments can be found at https://github.com/Nadav-Barak/AWP.", "bibtex": "@InProceedings{pmlr-v139-barak21a,\n title = \t {Approximating a Distribution Using Weight Queries},\n author = {Barak, Nadav and Sabato, Sivan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {674--683},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/barak21a/barak21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/barak21a.html},\n abstract = \t {We consider a novel challenge: approximating a distribution without the ability to randomly sample from that distribution. We study how such an approximation can be obtained using *weight queries*. Given some data set of examples, a weight query presents one of the examples to an oracle, which returns the probability, according to the target distribution, of observing examples similar to the presented example. This oracle can represent, for instance, counting queries to a database of the target population, or an interface to a search engine which returns the number of results that match a given search. We propose an interactive algorithm that iteratively selects data set examples and performs corresponding weight queries. The algorithm finds a reweighting of the data set that approximates the weights according to the target distribution, using a limited number of weight queries. We derive an approximation bound on the total variation distance between the reweighting found by the algorithm and the best achievable reweighting. Our algorithm takes inspiration from the UCB approach common in multi-armed bandits problems, and combines it with a new discrepancy estimator and a greedy iterative procedure. In addition to our theoretical guarantees, we demonstrate in experiments the advantages of the proposed algorithm over several baselines. A python implementation of the proposed algorithm and of all the experiments can be found at https://github.com/Nadav-Barak/AWP.}\n}", "pdf": "http://proceedings.mlr.press/v139/barak21a/barak21a.pdf", "supp": "", "pdf_size": 483041, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=61075495064226805&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Ben Gurion University, Israel; Department of Computer Science, Ben Gurion University, Israel", "aff_domain": "post.bgu.ac.il;cs.bgu.ac.il", "email": "post.bgu.ac.il;cs.bgu.ac.il", "github": "https://github.com/Nadav-Barak/AWP", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/barak21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Ben Gurion University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.bgu.ac.il", "aff_unique_abbr": "BGU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Approximation Theory Based Methods for RKHS Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8553", "id": "8553", "proceeding": "http://proceedings.mlr.press/v139/takemori21a.html", "slides": "/media/icml-2021/Slides/8553.pdf", "author_site": "Sho Takemori, Masahiro Sato", "author": "Sho Takemori; Masahiro Sato", "abstract": "The RKHS bandit problem (also called kernelized multi-armed bandit problem) is an online optimization problem of non-linear functions with noisy feedback. Although the problem has been extensively studied, there are unsatisfactory results for some problems compared to the well-studied linear bandit case. Specifically, there is no general algorithm for the adversarial RKHS bandit problem. In addition, high computational complexity of existing algorithms hinders practical application. We address these issues by considering a novel amalgamation of approximation theory and the misspecified linear bandit problem. Using an approximation method, we propose efficient algorithms for the stochastic RKHS bandit problem and the first general algorithm for the adversarial RKHS bandit problem. Furthermore, we empirically show that one of our proposed methods has comparable cumulative regret to IGP-UCB and its running time is much shorter.", "bibtex": "@InProceedings{pmlr-v139-takemori21a,\n title = \t {Approximation Theory Based Methods for RKHS Bandits},\n author = {Takemori, Sho and Sato, Masahiro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10076--10085},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/takemori21a/takemori21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/takemori21a.html},\n abstract = \t {The RKHS bandit problem (also called kernelized multi-armed bandit problem) is an online optimization problem of non-linear functions with noisy feedback. Although the problem has been extensively studied, there are unsatisfactory results for some problems compared to the well-studied linear bandit case. Specifically, there is no general algorithm for the adversarial RKHS bandit problem. In addition, high computational complexity of existing algorithms hinders practical application. We address these issues by considering a novel amalgamation of approximation theory and the misspecified linear bandit problem. Using an approximation method, we propose efficient algorithms for the stochastic RKHS bandit problem and the first general algorithm for the adversarial RKHS bandit problem. Furthermore, we empirically show that one of our proposed methods has comparable cumulative regret to IGP-UCB and its running time is much shorter.}\n}", "pdf": "http://proceedings.mlr.press/v139/takemori21a/takemori21a.pdf", "supp": "", "pdf_size": 720939, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7413479400617912861&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "FUJIFILM Business Innovation, Kanagawa, Japan; FUJIFILM Business Innovation, Kanagawa, Japan", "aff_domain": "fuji\ufb01lm.com; ", "email": "fuji\ufb01lm.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/takemori21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "FUJIFILM Business Innovation", "aff_unique_dep": "", "aff_unique_url": "https://www.fujifilm.com/businessinnovation", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "title": "Approximation Theory of Convolutional Architectures for Time Series Modelling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10513", "id": "10513", "proceeding": "http://proceedings.mlr.press/v139/jiang21d.html", "slides": "", "author_site": "Haotian Jiang, Zhong Li, Qianxiao Li", "author": "Haotian Jiang; Zhong Li; Qianxiao Li", "abstract": "We study the approximation properties of convolutional architectures applied to time series modelling, which can be formulated mathematically as a functional approximation problem. In the recurrent setting, recent results reveal an intricate connection between approximation efficiency and memory structures in the data generation process. In this paper, we derive parallel results for convolutional architectures, with WaveNet being a prime example. Our results reveal that in this new setting, approximation efficiency is not only characterised by memory, but also additional fine structures in the target relationship. This leads to a novel definition of spectrum-based regularity that measures the complexity of temporal relationships under the convolutional approximation scheme. These analyses provide a foundation to understand the differences between architectural choices for time series modelling and can give theoretically grounded guidance for practical applications.", "bibtex": "@InProceedings{pmlr-v139-jiang21d,\n title = \t {Approximation Theory of Convolutional Architectures for Time Series Modelling},\n author = {Jiang, Haotian and Li, Zhong and Li, Qianxiao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4961--4970},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21d/jiang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21d.html},\n abstract = \t {We study the approximation properties of convolutional architectures applied to time series modelling, which can be formulated mathematically as a functional approximation problem. In the recurrent setting, recent results reveal an intricate connection between approximation efficiency and memory structures in the data generation process. In this paper, we derive parallel results for convolutional architectures, with WaveNet being a prime example. Our results reveal that in this new setting, approximation efficiency is not only characterised by memory, but also additional fine structures in the target relationship. This leads to a novel definition of spectrum-based regularity that measures the complexity of temporal relationships under the convolutional approximation scheme. These analyses provide a foundation to understand the differences between architectural choices for time series modelling and can give theoretically grounded guidance for practical applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21d/jiang21d.pdf", "supp": "", "pdf_size": 497321, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14899377893097120435&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Mathematics, National University of Singapore; School of Mathematical Science, Peking University; Institute of High Performance Computing, A*STAR, Singapore", "aff_domain": "nus.edu.sg;pku.edu.cn;nus.edu.sg", "email": "nus.edu.sg;pku.edu.cn;nus.edu.sg", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jiang21d.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "National University of Singapore;Peking University;A*STAR Institute of High Performance Computing", "aff_unique_dep": "Department of Mathematics;School of Mathematical Science;Institute of High Performance Computing", "aff_unique_url": "https://www.nus.edu.sg;http://www.pku.edu.cn;https://www.ihpc.a-star.edu.sg", "aff_unique_abbr": "NUS;PKU;IHPC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Singapore;China" }, { "title": "Asymmetric Heavy Tails and Implicit Bias in Gaussian Noise Injections", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9019", "id": "9019", "proceeding": "http://proceedings.mlr.press/v139/camuto21a.html", "slides": "", "author_site": "Alexander D Camuto, Xiaoyu Wang, Lingjiong Zhu, Christopher Holmes, Mert Gurbuzbalaban, Umut Simsekli", "author": "Alexander Camuto; Xiaoyu Wang; Lingjiong Zhu; Chris Holmes; Mert Gurbuzbalaban; Umut Simsekli", "abstract": "Gaussian noise injections (GNIs) are a family of simple and widely-used regularisation methods for training neural networks, where one injects additive or multiplicative Gaussian noise to the network activations at every iteration of the optimisation algorithm, which is typically chosen as stochastic gradient descent (SGD). In this paper, we focus on the so-called \u2018implicit effect\u2019 of GNIs, which is the effect of the injected noise on the dynamics of SGD. We show that this effect induces an \\emph{asymmetric heavy-tailed noise} on SGD gradient updates. In order to model this modified dynamics, we first develop a Langevin-like stochastic differential equation that is driven by a general family of \\emph{asymmetric} heavy-tailed noise. Using this model we then formally prove that GNIs induce an \u2018implicit bias\u2019, which varies depending on the heaviness of the tails and the level of asymmetry. Our empirical results confirm that different types of neural networks trained with GNIs are well-modelled by the proposed dynamics and that the implicit effect of these injections induces a bias that degrades the performance of networks.", "bibtex": "@InProceedings{pmlr-v139-camuto21a,\n title = \t {Asymmetric Heavy Tails and Implicit Bias in Gaussian Noise Injections},\n author = {Camuto, Alexander and Wang, Xiaoyu and Zhu, Lingjiong and Holmes, Chris and Gurbuzbalaban, Mert and Simsekli, Umut},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1249--1260},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/camuto21a/camuto21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/camuto21a.html},\n abstract = \t {Gaussian noise injections (GNIs) are a family of simple and widely-used regularisation methods for training neural networks, where one injects additive or multiplicative Gaussian noise to the network activations at every iteration of the optimisation algorithm, which is typically chosen as stochastic gradient descent (SGD). In this paper, we focus on the so-called \u2018implicit effect\u2019 of GNIs, which is the effect of the injected noise on the dynamics of SGD. We show that this effect induces an \\emph{asymmetric heavy-tailed noise} on SGD gradient updates. In order to model this modified dynamics, we first develop a Langevin-like stochastic differential equation that is driven by a general family of \\emph{asymmetric} heavy-tailed noise. Using this model we then formally prove that GNIs induce an \u2018implicit bias\u2019, which varies depending on the heaviness of the tails and the level of asymmetry. Our empirical results confirm that different types of neural networks trained with GNIs are well-modelled by the proposed dynamics and that the implicit effect of these injections induces a bias that degrades the performance of networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/camuto21a/camuto21a.pdf", "supp": "", "pdf_size": 1891149, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6154175937826979347&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/camuto21a.html" }, { "title": "Asymmetric Loss Functions for Learning with Noisy Labels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8595", "id": "8595", "proceeding": "http://proceedings.mlr.press/v139/zhou21f.html", "slides": "/media/icml-2021/Slides/8595.pdf", "author_site": "Xiong Zhou, Xianming Liu, Junjun Jiang, Xin Gao, Xiangyang Ji", "author": "Xiong Zhou; Xianming Liu; Junjun Jiang; Xin Gao; Xiangyang Ji", "abstract": "Robust loss functions are essential for training deep neural networks with better generalization power in the presence of noisy labels. Symmetric loss functions are confirmed to be robust to label noise. However, the symmetric condition is overly restrictive. In this work, we propose a new class of loss functions, namely asymmetric loss functions, which are robust to learning from noisy labels for arbitrary noise type. Subsequently, we investigate general theoretical properties of asymmetric loss functions, including classification-calibration, excess risk bound, and noise-tolerance. Meanwhile, we introduce the asymmetry ratio to measure the asymmetry of a loss function, and the empirical results show that a higher ratio will provide better robustness. Moreover, we modify several common loss functions, and establish the necessary and sufficient conditions for them to be asymmetric. Experiments on benchmark datasets demonstrate that asymmetric loss functions can outperform state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-zhou21f,\n title = \t {Asymmetric Loss Functions for Learning with Noisy Labels},\n author = {Zhou, Xiong and Liu, Xianming and Jiang, Junjun and Gao, Xin and Ji, Xiangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12846--12856},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21f/zhou21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21f.html},\n abstract = \t {Robust loss functions are essential for training deep neural networks with better generalization power in the presence of noisy labels. Symmetric loss functions are confirmed to be robust to label noise. However, the symmetric condition is overly restrictive. In this work, we propose a new class of loss functions, namely asymmetric loss functions, which are robust to learning from noisy labels for arbitrary noise type. Subsequently, we investigate general theoretical properties of asymmetric loss functions, including classification-calibration, excess risk bound, and noise-tolerance. Meanwhile, we introduce the asymmetry ratio to measure the asymmetry of a loss function, and the empirical results show that a higher ratio will provide better robustness. Moreover, we modify several common loss functions, and establish the necessary and sufficient conditions for them to be asymmetric. Experiments on benchmark datasets demonstrate that asymmetric loss functions can outperform state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21f/zhou21f.pdf", "supp": "", "pdf_size": 5242024, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=425870196210326248&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology+Peng Cheng Laboratory; King Abdullah University of Science and Technology+Peng Cheng Laboratory; Tsinghua University", "aff_domain": "hit.edu.cn;hit.edu.cn;hit.edu.cn;kaust.edu.sa;tsinghua.edu.cn", "email": "hit.edu.cn;hit.edu.cn;hit.edu.cn;kaust.edu.sa;tsinghua.edu.cn", "github": "https://github.com/hitcszx/ALFs", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhou21f.html", "aff_unique_index": "0+1;0+1;0+1;2+1;3", "aff_unique_norm": "Harbin Institute of Technology;Pengcheng Laboratory;King Abdullah University of Science and Technology;Tsinghua University", "aff_unique_dep": ";Peng Cheng Laboratory;;", "aff_unique_url": "http://www.hit.edu.cn/;http://www.pcl.ac.cn;https://www.kast.kau.edu.sa;https://www.tsinghua.edu.cn", "aff_unique_abbr": "HIT;PCL;KAUST;THU", "aff_campus_unique_index": "0;0;0;", "aff_campus_unique": "Harbin;", "aff_country_unique_index": "0+0;0+0;0+0;1+0;0", "aff_country_unique": "China;Saudi Arabia" }, { "title": "Asymptotic Normality and Confidence Intervals for Prediction Risk of the Min-Norm Least Squares Estimator", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8631", "id": "8631", "proceeding": "http://proceedings.mlr.press/v139/li21x.html", "slides": "", "author_site": "Zeng Li, Chuanlong Xie, Qinwen Wang", "author": "Zeng Li; Chuanlong Xie; Qinwen Wang", "abstract": "This paper quantifies the uncertainty of prediction risk for the min-norm least squares estimator in high-dimensional linear regression models. We establish the asymptotic normality of prediction risk when both the sample size and the number of features tend to infinity. Based on the newly established central limit theorems(CLTs), we derive the confidence intervals of the prediction risk under various scenarios. Our results demonstrate the sample-wise non-monotonicity of the prediction risk and confirm \u201cmore data hurt\" phenomenon. Furthermore, the width of confidence intervals indicates that over-parameterization would enlarge the randomness of prediction performance.", "bibtex": "@InProceedings{pmlr-v139-li21x,\n title = \t {Asymptotic Normality and Confidence Intervals for Prediction Risk of the Min-Norm Least Squares Estimator},\n author = {Li, Zeng and Xie, Chuanlong and Wang, Qinwen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6533--6542},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21x/li21x.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21x.html},\n abstract = \t {This paper quantifies the uncertainty of prediction risk for the min-norm least squares estimator in high-dimensional linear regression models. We establish the asymptotic normality of prediction risk when both the sample size and the number of features tend to infinity. Based on the newly established central limit theorems(CLTs), we derive the confidence intervals of the prediction risk under various scenarios. Our results demonstrate the sample-wise non-monotonicity of the prediction risk and confirm \u201cmore data hurt\" phenomenon. Furthermore, the width of confidence intervals indicates that over-parameterization would enlarge the randomness of prediction performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21x/li21x.pdf", "supp": "", "pdf_size": 914123, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3997789156943606722&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Statistics and Data Science, Southern University of Science and Technology, China; Huawei Noah\u2019s Ark Lab, Hong Kong; School of Data Science, Fudan University, China", "aff_domain": "sustech.edu.cn;huawei.com;fudan.edu.cn", "email": "sustech.edu.cn;huawei.com;fudan.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21x.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Southern University of Science and Technology;Huawei;Fudan University", "aff_unique_dep": "Department of Statistics and Data Science;Huawei Noah\u2019s Ark Lab;School of Data Science", "aff_unique_url": "https://www.sustech.edu.cn;https://www.huawei.com/en/ai/noahs-ark-lab;https://www.fudan.edu.cn", "aff_unique_abbr": "SUSTech;Huawei Noah\u2019s Ark Lab;Fudan", "aff_campus_unique_index": "1", "aff_campus_unique": ";Hong Kong SAR", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Asymptotics of Ridge Regression in Convolutional Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9899", "id": "9899", "proceeding": "http://proceedings.mlr.press/v139/sahraee-ardakan21a.html", "slides": "", "author_site": "Mojtaba Sahraee-Ardakan, Tung Mai, Anup Rao, Ryan A. Rossi, Sundeep Rangan, Alyson Fletcher", "author": "Mojtaba Sahraee-Ardakan; Tung Mai; Anup Rao; Ryan A. Rossi; Sundeep Rangan; Alyson K Fletcher", "abstract": "Understanding generalization and estimation error of estimators for simple models such as linear and generalized linear models has attracted a lot of attention recently. This is in part due to an interesting observation made in machine learning community that highly over-parameterized neural networks achieve zero training error, and yet they are able to generalize well over the test samples. This phenomenon is captured by the so called double descent curve, where the generalization error starts decreasing again after the interpolation threshold. A series of recent works tried to explain such phenomenon for simple models. In this work, we analyze the asymptotics of estimation error in ridge estimators for convolutional linear models. These convolutional inverse problems, also known as deconvolution, naturally arise in different fields such as seismology, imaging, and acoustics among others. Our results hold for a large class of input distributions that include i.i.d. features as a special case. We derive exact formulae for estimation error of ridge estimators that hold in a certain high-dimensional regime. We show the double descent phenomenon in our experiments for convolutional models and show that our theoretical results match the experiments.", "bibtex": "@InProceedings{pmlr-v139-sahraee-ardakan21a,\n title = \t {Asymptotics of Ridge Regression in Convolutional Models},\n author = {Sahraee-Ardakan, Mojtaba and Mai, Tung and Rao, Anup and Rossi, Ryan A. and Rangan, Sundeep and Fletcher, Alyson K},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9265--9275},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sahraee-ardakan21a/sahraee-ardakan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sahraee-ardakan21a.html},\n abstract = \t {Understanding generalization and estimation error of estimators for simple models such as linear and generalized linear models has attracted a lot of attention recently. This is in part due to an interesting observation made in machine learning community that highly over-parameterized neural networks achieve zero training error, and yet they are able to generalize well over the test samples. This phenomenon is captured by the so called double descent curve, where the generalization error starts decreasing again after the interpolation threshold. A series of recent works tried to explain such phenomenon for simple models. In this work, we analyze the asymptotics of estimation error in ridge estimators for convolutional linear models. These convolutional inverse problems, also known as deconvolution, naturally arise in different fields such as seismology, imaging, and acoustics among others. Our results hold for a large class of input distributions that include i.i.d. features as a special case. We derive exact formulae for estimation error of ridge estimators that hold in a certain high-dimensional regime. We show the double descent phenomenon in our experiments for convolutional models and show that our theoretical results match the experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/sahraee-ardakan21a/sahraee-ardakan21a.pdf", "supp": "", "pdf_size": 995454, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2538443402902804542&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, University of California, Los Angeles + Department of Statistics, University of California, Los Angeles; Adobe Research; Adobe Research; Adobe Research; Department of Electrical and Computer Engineering, New York University; Department of Electrical and Computer Engineering, University of California, Los Angeles + Department of Statistics, University of California, Los Angeles", "aff_domain": "ucla.edu; ; ; ; ; ", "email": "ucla.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/sahraee-ardakan21a.html", "aff_unique_index": "0+0;1;1;1;2;0+0", "aff_unique_norm": "University of California, Los Angeles;Adobe;New York University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Adobe Research;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucla.edu;https://research.adobe.com;https://www.nyu.edu", "aff_unique_abbr": "UCLA;Adobe;NYU", "aff_campus_unique_index": "0+0;2;0+0", "aff_campus_unique": "Los Angeles;;New York", "aff_country_unique_index": "0+0;0;0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Asynchronous Decentralized Optimization With Implicit Stochastic Variance Reduction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8957", "id": "8957", "proceeding": "http://proceedings.mlr.press/v139/niwa21a.html", "slides": "/media/icml-2021/Slides/8957.pdf", "author_site": "Kenta Niwa, Guoqiang Zhang, W. Bastiaan Kleijn, Noboru Harada, Hiroshi Sawada, Akinori Fujino", "author": "Kenta Niwa; Guoqiang Zhang; W. Bastiaan Kleijn; Noboru Harada; Hiroshi Sawada; Akinori Fujino", "abstract": "A novel asynchronous decentralized optimization method that follows Stochastic Variance Reduction (SVR) is proposed. Average consensus algorithms, such as Decentralized Stochastic Gradient Descent (DSGD), facilitate distributed training of machine learning models. However, the gradient will drift within the local nodes due to statistical heterogeneity of the subsets of data residing on the nodes and long communication intervals. To overcome the drift problem, (i) Gradient Tracking-SVR (GT-SVR) integrates SVR into DSGD and (ii) Edge-Consensus Learning (ECL) solves a model constrained minimization problem using a primal-dual formalism. In this paper, we reformulate the update procedure of ECL such that it implicitly includes the gradient modification of SVR by optimally selecting a constraint-strength control parameter. Through convergence analysis and experiments, we confirmed that the proposed ECL with Implicit SVR (ECL-ISVR) is stable and approximately reaches the reference performance obtained with computation on a single-node using full data set.", "bibtex": "@InProceedings{pmlr-v139-niwa21a,\n title = \t {Asynchronous Decentralized Optimization With Implicit Stochastic Variance Reduction},\n author = {Niwa, Kenta and Zhang, Guoqiang and Kleijn, W. Bastiaan and Harada, Noboru and Sawada, Hiroshi and Fujino, Akinori},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8195--8204},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/niwa21a/niwa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/niwa21a.html},\n abstract = \t {A novel asynchronous decentralized optimization method that follows Stochastic Variance Reduction (SVR) is proposed. Average consensus algorithms, such as Decentralized Stochastic Gradient Descent (DSGD), facilitate distributed training of machine learning models. However, the gradient will drift within the local nodes due to statistical heterogeneity of the subsets of data residing on the nodes and long communication intervals. To overcome the drift problem, (i) Gradient Tracking-SVR (GT-SVR) integrates SVR into DSGD and (ii) Edge-Consensus Learning (ECL) solves a model constrained minimization problem using a primal-dual formalism. In this paper, we reformulate the update procedure of ECL such that it implicitly includes the gradient modification of SVR by optimally selecting a constraint-strength control parameter. Through convergence analysis and experiments, we confirmed that the proposed ECL with Implicit SVR (ECL-ISVR) is stable and approximately reaches the reference performance obtained with computation on a single-node using full data set.}\n}", "pdf": "http://proceedings.mlr.press/v139/niwa21a/niwa21a.pdf", "supp": "", "pdf_size": 1457366, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1559241087892244167&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "NTT Communication Science Laboratories, Kyoto, Japan+NTT Media Intelligence Laboratories, Tokyo, Japan; University of Technology Sydney, Sydney, Australia; Victoria University of Wellington, Wellington, New Zealand; NTT Communication Science Laboratories, Kyoto, Japan+NTT Media Intelligence Laboratories, Tokyo, Japan; NTT Communication Science Laboratories, Kyoto, Japan; NTT Communication Science Laboratories, Kyoto, Japan", "aff_domain": "hco.ntt.co.jp; ; ; ; ; ", "email": "hco.ntt.co.jp; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/niwa21a.html", "aff_unique_index": "0+1;2;3;0+1;0;0", "aff_unique_norm": "NTT Communication Science Laboratories;NTT Media Intelligence Laboratories;University of Technology Sydney;Victoria University of Wellington", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.ntt-csl.com;https://www.ntt.co.jp;https://www.uts.edu.au;https://www.victoria.ac.nz", "aff_unique_abbr": ";;UTS;VUW", "aff_campus_unique_index": "0+1;2;3;0+1;0;0", "aff_campus_unique": "Kyoto;Tokyo;Sydney;Wellington", "aff_country_unique_index": "0+0;1;2;0+0;0;0", "aff_country_unique": "Japan;Australia;New Zealand" }, { "title": "Asynchronous Distributed Learning : Adapting to Gradient Delays without Prior Knowledge", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10171", "id": "10171", "proceeding": "http://proceedings.mlr.press/v139/aviv21a.html", "slides": "", "author_site": "Rotem Zamir Aviv, Ido Hakimi, Assaf Schuster, Kfir Levy", "author": "Rotem Zamir Aviv; Ido Hakimi; Assaf Schuster; Kfir Yehuda Levy", "abstract": "We consider stochastic convex optimization problems, where several machines act asynchronously in parallel while sharing a common memory. We propose a robust training method for the constrained setting and derive non asymptotic convergence guarantees that do not depend on prior knowledge of update delays, objective smoothness, and gradient variance. Conversely, existing methods for this setting crucially rely on this prior knowledge, which render them unsuitable for essentially all shared-resources computational environments, such as clouds and data centers. Concretely, existing approaches are unable to accommodate changes in the delays which result from dynamic allocation of the machines, while our method implicitly adapts to such changes.", "bibtex": "@InProceedings{pmlr-v139-aviv21a,\n title = \t {Asynchronous Distributed Learning : Adapting to Gradient Delays without Prior Knowledge},\n author = {Aviv, Rotem Zamir and Hakimi, Ido and Schuster, Assaf and Levy, Kfir Yehuda},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {436--445},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/aviv21a/aviv21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/aviv21a.html},\n abstract = \t {We consider stochastic convex optimization problems, where several machines act asynchronously in parallel while sharing a common memory. We propose a robust training method for the constrained setting and derive non asymptotic convergence guarantees that do not depend on prior knowledge of update delays, objective smoothness, and gradient variance. Conversely, existing methods for this setting crucially rely on this prior knowledge, which render them unsuitable for essentially all shared-resources computational environments, such as clouds and data centers. Concretely, existing approaches are unable to accommodate changes in the delays which result from dynamic allocation of the machines, while our method implicitly adapts to such changes.}\n}", "pdf": "http://proceedings.mlr.press/v139/aviv21a/aviv21a.pdf", "supp": "", "pdf_size": 1457186, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16207629602322165078&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "Viterby Faculty of Electrical and Computer Engineering, Technion, Haifa, Israel; Taub Faculty of Computer Science, Technion, Haifa, Israel; Taub Faculty of Computer Science, Technion, Haifa, Israel; Viterby Faculty of Electrical and Computer Engineering, Technion, Haifa, Israel + A Viterbi Fellow", "aff_domain": "campus.technion.ac.il; ; ; ", "email": "campus.technion.ac.il; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/aviv21a.html", "aff_unique_index": "0;0;0;0+1", "aff_unique_norm": "Technion;University of Southern California", "aff_unique_dep": "Viterby Faculty of Electrical and Computer Engineering;Viterbi School of Engineering", "aff_unique_url": "https://www.technion.ac.il;https://viterbi.usc.edu", "aff_unique_abbr": "Technion;USC", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Haifa;", "aff_country_unique_index": "0;0;0;0+1", "aff_country_unique": "Israel;United States" }, { "title": "Attention is not all you need: pure attention loses rank doubly exponentially with depth", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9821", "id": "9821", "proceeding": "http://proceedings.mlr.press/v139/dong21a.html", "slides": "", "author_site": "Yihe Dong, Jean-Baptiste Cordonnier, Andreas Loukas", "author": "Yihe Dong; Jean-Baptiste Cordonnier; Andreas Loukas", "abstract": "Attention-based architectures have become ubiquitous in machine learning. Yet, our understanding of the reasons for their effectiveness remains limited. This work proposes a new way to understand self-attention networks: we show that their output can be decomposed into a sum of smaller terms\u2014or paths\u2014each involving the operation of a sequence of attention heads across layers. Using this path decomposition, we prove that self-attention possesses a strong inductive bias towards \"token uniformity\". Specifically, without skip connections or multi-layer perceptrons (MLPs), the output converges doubly exponentially to a rank-1 matrix. On the other hand, skip connections and MLPs stop the output from degeneration. Our experiments verify the convergence results on standard transformer architectures.", "bibtex": "@InProceedings{pmlr-v139-dong21a,\n title = \t {Attention is not all you need: pure attention loses rank doubly exponentially with depth},\n author = {Dong, Yihe and Cordonnier, Jean-Baptiste and Loukas, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2793--2803},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dong21a/dong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dong21a.html},\n abstract = \t {Attention-based architectures have become ubiquitous in machine learning. Yet, our understanding of the reasons for their effectiveness remains limited. This work proposes a new way to understand self-attention networks: we show that their output can be decomposed into a sum of smaller terms\u2014or paths\u2014each involving the operation of a sequence of attention heads across layers. Using this path decomposition, we prove that self-attention possesses a strong inductive bias towards \"token uniformity\". Specifically, without skip connections or multi-layer perceptrons (MLPs), the output converges doubly exponentially to a rank-1 matrix. On the other hand, skip connections and MLPs stop the output from degeneration. Our experiments verify the convergence results on standard transformer architectures.}\n}", "pdf": "http://proceedings.mlr.press/v139/dong21a/dong21a.pdf", "supp": "", "pdf_size": 1263433, "gs_citation": 475, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6882435683900456661&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Google; EPFL; EPFL", "aff_domain": "google.com;epfl.ch;epfl.ch", "email": "google.com;epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dong21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Google;EPFL", "aff_unique_dep": "Google;", "aff_unique_url": "https://www.google.com;https://www.epfl.ch", "aff_unique_abbr": "Google;EPFL", "aff_campus_unique_index": "0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United States;Switzerland" }, { "title": "Augmented World Models Facilitate Zero-Shot Dynamics Generalization From a Single Offline Environment", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10307", "id": "10307", "proceeding": "http://proceedings.mlr.press/v139/ball21a.html", "slides": "/media/icml-2021/Slides/10307.pdf", "author_site": "Philip Ball, Cong Lu, Jack Parker-Holder, Stephen Roberts", "author": "Philip J Ball; Cong Lu; Jack Parker-Holder; Stephen Roberts", "abstract": "Reinforcement learning from large-scale offline datasets provides us with the ability to learn policies without potentially unsafe or impractical exploration. Significant progress has been made in the past few years in dealing with the challenge of correcting for differing behavior between the data collection and learned policies. However, little attention has been paid to potentially changing dynamics when transferring a policy to the online setting, where performance can be up to 90% reduced for existing methods. In this paper we address this problem with Augmented World Models (AugWM). We augment a learned dynamics model with simple transformations that seek to capture potential changes in physical properties of the robot, leading to more robust policies. We not only train our policy in this new setting, but also provide it with the sampled augmentation as a context, allowing it to adapt to changes in the environment. At test time we learn the context in a self-supervised fashion by approximating the augmentation which corresponds to the new environment. We rigorously evaluate our approach on over 100 different changed dynamics settings, and show that this simple approach can significantly improve the zero-shot generalization of a recent state-of-the-art baseline, often achieving successful policies where the baseline fails.", "bibtex": "@InProceedings{pmlr-v139-ball21a,\n title = \t {Augmented World Models Facilitate Zero-Shot Dynamics Generalization From a Single Offline Environment},\n author = {Ball, Philip J and Lu, Cong and Parker-Holder, Jack and Roberts, Stephen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {619--629},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ball21a/ball21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ball21a.html},\n abstract = \t {Reinforcement learning from large-scale offline datasets provides us with the ability to learn policies without potentially unsafe or impractical exploration. Significant progress has been made in the past few years in dealing with the challenge of correcting for differing behavior between the data collection and learned policies. However, little attention has been paid to potentially changing dynamics when transferring a policy to the online setting, where performance can be up to 90% reduced for existing methods. In this paper we address this problem with Augmented World Models (AugWM). We augment a learned dynamics model with simple transformations that seek to capture potential changes in physical properties of the robot, leading to more robust policies. We not only train our policy in this new setting, but also provide it with the sampled augmentation as a context, allowing it to adapt to changes in the environment. At test time we learn the context in a self-supervised fashion by approximating the augmentation which corresponds to the new environment. We rigorously evaluate our approach on over 100 different changed dynamics settings, and show that this simple approach can significantly improve the zero-shot generalization of a recent state-of-the-art baseline, often achieving successful policies where the baseline fails.}\n}", "pdf": "http://proceedings.mlr.press/v139/ball21a/ball21a.pdf", "supp": "", "pdf_size": 1694645, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14271738671844853753&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Oxford; University of Oxford; University of Oxford; University of Oxford", "aff_domain": "robots.ox.ac.uk;stats.ox.ac.uk; ; ", "email": "robots.ox.ac.uk;stats.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ball21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Auto-NBA: Efficient and Effective Search Over the Joint Space of Networks, Bitwidths, and Accelerators", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9949", "id": "9949", "proceeding": "http://proceedings.mlr.press/v139/fu21d.html", "slides": "", "author_site": "Yonggan Fu, Yongan Zhang, Yang Zhang, David Cox, Yingyan Lin", "author": "Yonggan Fu; Yongan Zhang; Yang Zhang; David Cox; Yingyan Lin", "abstract": "While maximizing deep neural networks\u2019 (DNNs\u2019) acceleration efficiency requires a joint search/design of three different yet highly coupled aspects, including the networks, bitwidths, and accelerators, the challenges associated with such a joint search have not yet been fully understood and addressed. The key challenges include (1) the dilemma of whether to explode the memory consumption due to the huge joint space or achieve sub-optimal designs, (2) the discrete nature of the accelerator design space that is coupled yet different from that of the networks and bitwidths, and (3) the chicken and egg problem associated with network-accelerator co-search, i.e., co-search requires operation-wise hardware cost, which is lacking during search as the optimal accelerator depending on the whole network is still unknown during search. To tackle these daunting challenges towards optimal and fast development of DNN accelerators, we propose a framework dubbed Auto-NBA to enable jointly searching for the Networks, Bitwidths, and Accelerators, by efficiently localizing the optimal design within the huge joint design space for each target dataset and acceleration specification. Our Auto-NBA integrates a heterogeneous sampling strategy to achieve unbiased search with constant memory consumption, and a novel joint-search pipeline equipped with a generic differentiable accelerator search engine. Extensive experiments and ablation studies validate that both Auto-NBA generated networks and accelerators consistently outperform state-of-the-art designs (including co-search/exploration techniques, hardware-aware NAS methods, and DNN accelerators), in terms of search time, task accuracy, and accelerator efficiency. Our codes are available at: https://github.com/RICE-EIC/Auto-NBA.", "bibtex": "@InProceedings{pmlr-v139-fu21d,\n title = \t {Auto-NBA: Efficient and Effective Search Over the Joint Space of Networks, Bitwidths, and Accelerators},\n author = {Fu, Yonggan and Zhang, Yongan and Zhang, Yang and Cox, David and Lin, Yingyan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3505--3517},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fu21d/fu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/fu21d.html},\n abstract = \t {While maximizing deep neural networks\u2019 (DNNs\u2019) acceleration efficiency requires a joint search/design of three different yet highly coupled aspects, including the networks, bitwidths, and accelerators, the challenges associated with such a joint search have not yet been fully understood and addressed. The key challenges include (1) the dilemma of whether to explode the memory consumption due to the huge joint space or achieve sub-optimal designs, (2) the discrete nature of the accelerator design space that is coupled yet different from that of the networks and bitwidths, and (3) the chicken and egg problem associated with network-accelerator co-search, i.e., co-search requires operation-wise hardware cost, which is lacking during search as the optimal accelerator depending on the whole network is still unknown during search. To tackle these daunting challenges towards optimal and fast development of DNN accelerators, we propose a framework dubbed Auto-NBA to enable jointly searching for the Networks, Bitwidths, and Accelerators, by efficiently localizing the optimal design within the huge joint design space for each target dataset and acceleration specification. Our Auto-NBA integrates a heterogeneous sampling strategy to achieve unbiased search with constant memory consumption, and a novel joint-search pipeline equipped with a generic differentiable accelerator search engine. Extensive experiments and ablation studies validate that both Auto-NBA generated networks and accelerators consistently outperform state-of-the-art designs (including co-search/exploration techniques, hardware-aware NAS methods, and DNN accelerators), in terms of search time, task accuracy, and accelerator efficiency. Our codes are available at: https://github.com/RICE-EIC/Auto-NBA.}\n}", "pdf": "http://proceedings.mlr.press/v139/fu21d/fu21d.pdf", "supp": "", "pdf_size": 2028333, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=860563000728112413&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, Rice University; MIT-IBM Watson AI Lab; MIT-IBM Watson AI Lab; Department of Electrical and Computer Engineering, Rice University", "aff_domain": "rice.edu; ; ; ;rice.edu", "email": "rice.edu; ; ; ;rice.edu", "github": "https://github.com/RICE-EIC/Auto-NBA", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/fu21d.html", "aff_unique_index": "0;0;1;1;0", "aff_unique_norm": "Rice University;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical and Computer Engineering;IBM Watson AI Lab", "aff_unique_url": "https://www.rice.edu;https://www.mitibmwatsonailab.org", "aff_unique_abbr": "Rice;MIT-IBM AI Lab", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "AutoAttend: Automated Attention Representation Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8657", "id": "8657", "proceeding": "http://proceedings.mlr.press/v139/guan21a.html", "slides": "", "author_site": "Chaoyu Guan, Xin Wang, Wenwu Zhu", "author": "Chaoyu Guan; Xin Wang; Wenwu Zhu", "abstract": "Self-attention mechanisms have been widely adopted in many machine learning areas, including Natural Language Processing (NLP) and Graph Representation Learning (GRL), etc. However, existing works heavily rely on hand-crafted design to obtain customized attention mechanisms. In this paper, we automate Key, Query and Value representation design, which is one of the most important steps to obtain effective self-attentions. We propose an automated self-attention representation model, AutoAttend, which can automatically search powerful attention representations for downstream tasks leveraging Neural Architecture Search (NAS). In particular, we design a tailored search space for attention representation automation, which is flexible to produce effective attention representation designs. Based on the design prior obtained from attention representations in previous works, we further regularize our search space to reduce the space complexity without the loss of expressivity. Moreover, we propose a novel context-aware parameter sharing mechanism considering special characteristics of each sub-architecture to provide more accurate architecture estimations when conducting parameter sharing in our tailored search space. Experiments show the superiority of our proposed AutoAttend model over previous state-of-the-arts on eight text classification tasks in NLP and four node classification tasks in GRL.", "bibtex": "@InProceedings{pmlr-v139-guan21a,\n title = \t {AutoAttend: Automated Attention Representation Search},\n author = {Guan, Chaoyu and Wang, Xin and Zhu, Wenwu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3864--3874},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/guan21a/guan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/guan21a.html},\n abstract = \t {Self-attention mechanisms have been widely adopted in many machine learning areas, including Natural Language Processing (NLP) and Graph Representation Learning (GRL), etc. However, existing works heavily rely on hand-crafted design to obtain customized attention mechanisms. In this paper, we automate Key, Query and Value representation design, which is one of the most important steps to obtain effective self-attentions. We propose an automated self-attention representation model, AutoAttend, which can automatically search powerful attention representations for downstream tasks leveraging Neural Architecture Search (NAS). In particular, we design a tailored search space for attention representation automation, which is flexible to produce effective attention representation designs. Based on the design prior obtained from attention representations in previous works, we further regularize our search space to reduce the space complexity without the loss of expressivity. Moreover, we propose a novel context-aware parameter sharing mechanism considering special characteristics of each sub-architecture to provide more accurate architecture estimations when conducting parameter sharing in our tailored search space. Experiments show the superiority of our proposed AutoAttend model over previous state-of-the-arts on eight text classification tasks in NLP and four node classification tasks in GRL.}\n}", "pdf": "http://proceedings.mlr.press/v139/guan21a/guan21a.pdf", "supp": "", "pdf_size": 363115, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12681719189192014106&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University", "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "email": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/guan21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "Department of Computer Science and Technology", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "AutoSampling: Search for Effective Data Sampling Schedules", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8661", "id": "8661", "proceeding": "http://proceedings.mlr.press/v139/sun21a.html", "slides": "", "author_site": "MING SUN, Haoxuan Dou, Baopu Li, Junjie Yan, Wanli Ouyang, Lei Cui", "author": "Ming Sun; Haoxuan Dou; Baopu Li; Junjie Yan; Wanli Ouyang; Lei Cui", "abstract": "Data sampling acts as a pivotal role in training deep learning models. However, an effective sampling schedule is difficult to learn due to its inherent high-dimension as a hyper-parameter. In this paper, we propose an AutoSampling method to automatically learn sampling schedules for model training, which consists of the multi-exploitation step aiming for optimal local sampling schedules and the exploration step for the ideal sampling distribution. More specifically, we achieve sampling schedule search with shortened exploitation cycle to provide enough supervision. In addition, we periodically estimate the sampling distribution from the learned sampling schedules and perturb it to search in the distribution space. The combination of two searches allows us to learn a robust sampling schedule. We apply our AutoSampling method to a variety of image classification tasks illustrating the effectiveness of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-sun21a,\n title = \t {AutoSampling: Search for Effective Data Sampling Schedules},\n author = {Sun, Ming and Dou, Haoxuan and Li, Baopu and Yan, Junjie and Ouyang, Wanli and Cui, Lei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9923--9933},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sun21a/sun21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sun21a.html},\n abstract = \t {Data sampling acts as a pivotal role in training deep learning models. However, an effective sampling schedule is difficult to learn due to its inherent high-dimension as a hyper-parameter. In this paper, we propose an AutoSampling method to automatically learn sampling schedules for model training, which consists of the multi-exploitation step aiming for optimal local sampling schedules and the exploration step for the ideal sampling distribution. More specifically, we achieve sampling schedule search with shortened exploitation cycle to provide enough supervision. In addition, we periodically estimate the sampling distribution from the learned sampling schedules and perturb it to search in the distribution space. The combination of two searches allows us to learn a robust sampling schedule. We apply our AutoSampling method to a variety of image classification tasks illustrating the effectiveness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/sun21a/sun21a.pdf", "supp": "", "pdf_size": 562308, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3746069801375272827&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "SenseTime Research; SenseTime Research; Baidu USA LLC; SenseTime Research; SenseTime Research; The University of Sydney", "aff_domain": "sensetime.com;sensetime.com;baidu.com;sensetime.com;sensetime.com;sydney.edu.au", "email": "sensetime.com;sensetime.com;baidu.com;sensetime.com;sensetime.com;sydney.edu.au", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/sun21a.html", "aff_unique_index": "0;0;1;0;0;2", "aff_unique_norm": "SenseTime;Baidu;University of Sydney", "aff_unique_dep": "SenseTime Research;Baidu;", "aff_unique_url": "https://www.sensetime.com;https://www.baidu.com;https://www.sydney.edu.au", "aff_unique_abbr": "SenseTime;Baidu;USYD", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0;0;2", "aff_country_unique": "China;United States;Australia" }, { "title": "Autoencoder Image Interpolation by Shaping the Latent Space", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8769", "id": "8769", "proceeding": "http://proceedings.mlr.press/v139/oring21a.html", "slides": "", "author_site": "Alon Oring, Zohar Yakhini, Yacov Hel-Or", "author": "Alon Oring; Zohar Yakhini; Yacov Hel-Or", "abstract": "One of the fascinating properties of deep learning is the ability of the network to reveal the underlying factors characterizing elements in datasets of different types. Autoencoders represent an effective approach for computing these factors. Autoencoders have been studied in the context of enabling interpolation between data points by decoding convex combinations of latent vectors. However, this interpolation often leads to artifacts or produces unrealistic results during reconstruction. We argue that these incongruities are due to the structure of the latent space and to the fact that such naively interpolated latent vectors deviate from the data manifold. In this paper, we propose a regularization technique that shapes the latent representation to follow a manifold that is consistent with the training images and that forces the manifold to be smooth and locally convex. This regularization not only enables faithful interpolation between data points, as we show herein but can also be used as a general regularization technique to avoid overfitting or to produce new samples for data augmentation.", "bibtex": "@InProceedings{pmlr-v139-oring21a,\n title = \t {Autoencoder Image Interpolation by Shaping the Latent Space},\n author = {Oring, Alon and Yakhini, Zohar and Hel-Or, Yacov},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8281--8290},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/oring21a/oring21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/oring21a.html},\n abstract = \t {One of the fascinating properties of deep learning is the ability of the network to reveal the underlying factors characterizing elements in datasets of different types. Autoencoders represent an effective approach for computing these factors. Autoencoders have been studied in the context of enabling interpolation between data points by decoding convex combinations of latent vectors. However, this interpolation often leads to artifacts or produces unrealistic results during reconstruction. We argue that these incongruities are due to the structure of the latent space and to the fact that such naively interpolated latent vectors deviate from the data manifold. In this paper, we propose a regularization technique that shapes the latent representation to follow a manifold that is consistent with the training images and that forces the manifold to be smooth and locally convex. This regularization not only enables faithful interpolation between data points, as we show herein but can also be used as a general regularization technique to avoid overfitting or to produce new samples for data augmentation.}\n}", "pdf": "http://proceedings.mlr.press/v139/oring21a/oring21a.pdf", "supp": "", "pdf_size": 8099181, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "School of Computer Science, The Interdisciplinary Center, Herzliya, Israel; School of Computer Science, The Interdisciplinary Center, Herzliya, Israel; School of Computer Science, The Interdisciplinary Center, Herzliya, Israel", "aff_domain": "idc.ac.il;idc.ac.il;idc.ac.il", "email": "idc.ac.il;idc.ac.il;idc.ac.il", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/oring21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Interdisciplinary Center", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.idc.ac.il", "aff_unique_abbr": "IDC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Herzliya", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Autoencoding Under Normalization Constraints", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9093", "id": "9093", "proceeding": "http://proceedings.mlr.press/v139/yoon21c.html", "slides": "", "author_site": "Sangwoong Yoon, Yung-Kyun Noh, Frank Chongwoo Park", "author": "Sangwoong Yoon; Yung-Kyun Noh; Frank Park", "abstract": "Likelihood is a standard estimate for outlier detection. The specific role of the normalization constraint is to ensure that the out-of-distribution (OOD) regime has a small likelihood when samples are learned using maximum likelihood. Because autoencoders do not possess such a process of normalization, they often fail to recognize outliers even when they are obviously OOD. We propose the Normalized Autoencoder (NAE), a normalized probabilistic model constructed from an autoencoder. The probability density of NAE is defined using the reconstruction error of an autoencoder, which is differently defined in the conventional energy-based model. In our model, normalization is enforced by suppressing the reconstruction of negative samples, significantly improving the outlier detection performance. Our experimental results confirm the efficacy of NAE, both in detecting outliers and in generating in-distribution samples.", "bibtex": "@InProceedings{pmlr-v139-yoon21c,\n title = \t {Autoencoding Under Normalization Constraints},\n author = {Yoon, Sangwoong and Noh, Yung-Kyun and Park, Frank},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12087--12097},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoon21c/yoon21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoon21c.html},\n abstract = \t {Likelihood is a standard estimate for outlier detection. The specific role of the normalization constraint is to ensure that the out-of-distribution (OOD) regime has a small likelihood when samples are learned using maximum likelihood. Because autoencoders do not possess such a process of normalization, they often fail to recognize outliers even when they are obviously OOD. We propose the Normalized Autoencoder (NAE), a normalized probabilistic model constructed from an autoencoder. The probability density of NAE is defined using the reconstruction error of an autoencoder, which is differently defined in the conventional energy-based model. In our model, normalization is enforced by suppressing the reconstruction of negative samples, significantly improving the outlier detection performance. Our experimental results confirm the efficacy of NAE, both in detecting outliers and in generating in-distribution samples.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoon21c/yoon21c.pdf", "supp": "", "pdf_size": 1500589, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1297005004772257313&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yoon21c.html" }, { "title": "Automatic variational inference with cascading flows", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9519", "id": "9519", "proceeding": "http://proceedings.mlr.press/v139/ambrogioni21a.html", "slides": "/media/icml-2021/Slides/9519.pdf", "author_site": "Luca Ambrogioni, Gianluigi Silvestri, Marcel van Gerven", "author": "Luca Ambrogioni; Gianluigi Silvestri; Marcel van Gerven", "abstract": "The automation of probabilistic reasoning is one of the primary aims of machine learning. Recently, the confluence of variational inference and deep learning has led to powerful and flexible automatic inference methods that can be trained by stochastic gradient descent. In particular, normalizing flows are highly parameterized deep models that can fit arbitrarily complex posterior densities. However, normalizing flows struggle in highly structured probabilistic programs as they need to relearn the forward-pass of the program. Automatic structured variational inference (ASVI) remedies this problem by constructing variational programs that embed the forward-pass. Here, we combine the flexibility of normalizing flows and the prior-embedding property of ASVI in a new family of variational programs, which we named cascading flows. A cascading flows program interposes a newly designed highway flow architecture in between the conditional distributions of the prior program such as to steer it toward the observed data. These programs can be constructed automatically from an input probabilistic program and can also be amortized automatically. We evaluate the performance of the new variational programs in a series of structured inference problems. We find that cascading flows have much higher performance than both normalizing flows and ASVI in a large set of structured inference problems.", "bibtex": "@InProceedings{pmlr-v139-ambrogioni21a,\n title = \t {Automatic variational inference with cascading flows},\n author = {Ambrogioni, Luca and Silvestri, Gianluigi and van Gerven, Marcel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {254--263},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ambrogioni21a/ambrogioni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ambrogioni21a.html},\n abstract = \t {The automation of probabilistic reasoning is one of the primary aims of machine learning. Recently, the confluence of variational inference and deep learning has led to powerful and flexible automatic inference methods that can be trained by stochastic gradient descent. In particular, normalizing flows are highly parameterized deep models that can fit arbitrarily complex posterior densities. However, normalizing flows struggle in highly structured probabilistic programs as they need to relearn the forward-pass of the program. Automatic structured variational inference (ASVI) remedies this problem by constructing variational programs that embed the forward-pass. Here, we combine the flexibility of normalizing flows and the prior-embedding property of ASVI in a new family of variational programs, which we named cascading flows. A cascading flows program interposes a newly designed highway flow architecture in between the conditional distributions of the prior program such as to steer it toward the observed data. These programs can be constructed automatically from an input probabilistic program and can also be amortized automatically. We evaluate the performance of the new variational programs in a series of structured inference problems. We find that cascading flows have much higher performance than both normalizing flows and ASVI in a large set of structured inference problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/ambrogioni21a/ambrogioni21a.pdf", "supp": "", "pdf_size": 802043, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16991650093300670925&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Donders Centre for Cognition, Radboud University, Netherlands+OnePlanet Research Center, imec-the Netherlands, Wageningen, Netherlands; Donders Centre for Cognition, Radboud University, Netherlands; Donders Centre for Cognition, Radboud University, Netherlands", "aff_domain": "donders.ru.nl; ; ", "email": "donders.ru.nl; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ambrogioni21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "Radboud University;OnePlanet Research Center", "aff_unique_dep": "Donders Centre for Cognition;", "aff_unique_url": "https://www.ru.nl;", "aff_unique_abbr": "RU;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "Netherlands" }, { "title": "Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8591", "id": "8591", "proceeding": "http://proceedings.mlr.press/v139/rasul21a.html", "slides": "/media/icml-2021/Slides/8591.pdf", "author_site": "Kashif Rasul, Calvin Seward, Ingmar Schuster, Roland Vollgraf", "author": "Kashif Rasul; Calvin Seward; Ingmar Schuster; Roland Vollgraf", "abstract": "In this work, we propose TimeGrad, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.", "bibtex": "@InProceedings{pmlr-v139-rasul21a,\n title = \t {Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting},\n author = {Rasul, Kashif and Seward, Calvin and Schuster, Ingmar and Vollgraf, Roland},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8857--8868},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rasul21a/rasul21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rasul21a.html},\n abstract = \t {In this work, we propose TimeGrad, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.}\n}", "pdf": "http://proceedings.mlr.press/v139/rasul21a/rasul21a.pdf", "supp": "", "pdf_size": 5751832, "gs_citation": 439, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11453532699552258037&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Zalando Research; Zalando Research; Zalando Research; Zalando Research", "aff_domain": "zalando.de; ; ; ", "email": "zalando.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rasul21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Zalando SE", "aff_unique_dep": "Zalando Research", "aff_unique_url": "https://www.zalando.de", "aff_unique_abbr": "Zalando", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Germany" }, { "title": "Average-Reward Off-Policy Policy Evaluation with Function Approximation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8653", "id": "8653", "proceeding": "http://proceedings.mlr.press/v139/zhang21u.html", "slides": "", "author_site": "Shangtong Zhang, Yi Wan, Richard Sutton, Shimon Whiteson", "author": "Shangtong Zhang; Yi Wan; Richard S Sutton; Shimon Whiteson", "abstract": "We consider off-policy policy evaluation with function approximation (FA) in average-reward MDPs, where the goal is to estimate both the reward rate and the differential value function. For this problem, bootstrapping is necessary and, along with off-policy learning and FA, results in the deadly triad (Sutton & Barto, 2018). To address the deadly triad, we propose two novel algorithms, reproducing the celebrated success of Gradient TD algorithms in the average-reward setting. In terms of estimating the differential value function, the algorithms are the first convergent off-policy linear function approximation algorithms. In terms of estimating the reward rate, the algorithms are the first convergent off-policy linear function approximation algorithms that do not require estimating the density ratio. We demonstrate empirically the advantage of the proposed algorithms, as well as their nonlinear variants, over a competitive density-ratio-based approach, in a simple domain as well as challenging robot simulation tasks.", "bibtex": "@InProceedings{pmlr-v139-zhang21u,\n title = \t {Average-Reward Off-Policy Policy Evaluation with Function Approximation},\n author = {Zhang, Shangtong and Wan, Yi and Sutton, Richard S and Whiteson, Shimon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12578--12588},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21u/zhang21u.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21u.html},\n abstract = \t {We consider off-policy policy evaluation with function approximation (FA) in average-reward MDPs, where the goal is to estimate both the reward rate and the differential value function. For this problem, bootstrapping is necessary and, along with off-policy learning and FA, results in the deadly triad (Sutton & Barto, 2018). To address the deadly triad, we propose two novel algorithms, reproducing the celebrated success of Gradient TD algorithms in the average-reward setting. In terms of estimating the differential value function, the algorithms are the first convergent off-policy linear function approximation algorithms. In terms of estimating the reward rate, the algorithms are the first convergent off-policy linear function approximation algorithms that do not require estimating the density ratio. We demonstrate empirically the advantage of the proposed algorithms, as well as their nonlinear variants, over a competitive density-ratio-based approach, in a simple domain as well as challenging robot simulation tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21u/zhang21u.pdf", "supp": "", "pdf_size": 903957, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12042728594024517731&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Oxford; University of Alberta; University of Alberta; University of Oxford", "aff_domain": "cs.ox.ac.uk;ualberta.ca; ; ", "email": "cs.ox.ac.uk;ualberta.ca; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhang21u.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "University of Oxford;University of Alberta", "aff_unique_dep": ";", "aff_unique_url": "https://www.ox.ac.uk;https://www.ualberta.ca", "aff_unique_abbr": "Oxford;UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "BANG: Bridging Autoregressive and Non-autoregressive Generation with Large Scale Pretraining", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8523", "id": "8523", "proceeding": "http://proceedings.mlr.press/v139/qi21a.html", "slides": "", "author_site": "Weizhen Qi, Yeyun Gong, Jian Jiao, Yu Yan, Weizhu Chen, Dayiheng Liu, Kewen Tang, Houqiang Li, Jiusheng Chen, Ruofei Zhang, Ming Zhou, Nan Duan", "author": "Weizhen Qi; Yeyun Gong; Jian Jiao; Yu Yan; Weizhu Chen; Dayiheng Liu; Kewen Tang; Houqiang Li; Jiusheng Chen; Ruofei Zhang; Ming Zhou; Nan Duan", "abstract": "In this paper, we propose BANG, a new pretraining model to Bridge the gap between Autoregressive (AR) and Non-autoregressive (NAR) Generation. AR and NAR generation can be uniformly regarded as to what extent previous tokens can be attended, and BANG bridges AR and NAR generation through designing a novel model structure for large-scale pre-training. A pretrained BANG model can simultaneously support AR, NAR, and semi-NAR generation to meet different requirements. Experiments on question generation (SQuAD 1.1), summarization (XSum), and dialogue generation (PersonaChat) show that BANG improves NAR and semi-NAR performance significantly as well as attaining comparable performance with strong AR pretrained models. Compared with the semi-NAR strong baselines, BANG achieves absolute improvements of 14.01 and 5.24 in the overall scores of SQuAD 1.1 and XSum, respectively. In addition, BANG achieves absolute improvements of 10.73, 6.39, and 5.90 in the overall scores of SQuAD, XSUM, and PersonaChat compared with the NAR strong baselines, respectively. Our code will be made publicly available.", "bibtex": "@InProceedings{pmlr-v139-qi21a,\n title = \t {BANG: Bridging Autoregressive and Non-autoregressive Generation with Large Scale Pretraining},\n author = {Qi, Weizhen and Gong, Yeyun and Jiao, Jian and Yan, Yu and Chen, Weizhu and Liu, Dayiheng and Tang, Kewen and Li, Houqiang and Chen, Jiusheng and Zhang, Ruofei and Zhou, Ming and Duan, Nan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8630--8639},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qi21a/qi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/qi21a.html},\n abstract = \t {In this paper, we propose BANG, a new pretraining model to Bridge the gap between Autoregressive (AR) and Non-autoregressive (NAR) Generation. AR and NAR generation can be uniformly regarded as to what extent previous tokens can be attended, and BANG bridges AR and NAR generation through designing a novel model structure for large-scale pre-training. A pretrained BANG model can simultaneously support AR, NAR, and semi-NAR generation to meet different requirements. Experiments on question generation (SQuAD 1.1), summarization (XSum), and dialogue generation (PersonaChat) show that BANG improves NAR and semi-NAR performance significantly as well as attaining comparable performance with strong AR pretrained models. Compared with the semi-NAR strong baselines, BANG achieves absolute improvements of 14.01 and 5.24 in the overall scores of SQuAD 1.1 and XSum, respectively. In addition, BANG achieves absolute improvements of 10.73, 6.39, and 5.90 in the overall scores of SQuAD, XSUM, and PersonaChat compared with the NAR strong baselines, respectively. Our code will be made publicly available.}\n}", "pdf": "http://proceedings.mlr.press/v139/qi21a/qi21a.pdf", "supp": "", "pdf_size": 442158, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11081015911174909312&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;;;;;;;", "aff_domain": ";;;;;;;;;;;", "email": ";;;;;;;;;;;", "github": "https://github.com/microsoft/BANG", "project": "", "author_num": 12, "oa": "https://proceedings.mlr.press/v139/qi21a.html" }, { "title": "BASE Layers: Simplifying Training of Large, Sparse Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9213", "id": "9213", "proceeding": "http://proceedings.mlr.press/v139/lewis21a.html", "slides": "", "author_site": "Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, Luke Zettlemoyer", "author": "Mike Lewis; Shruti Bhosale; Tim Dettmers; Naman Goyal; Luke Zettlemoyer", "abstract": "We introduce a new balanced assignment of experts (BASE) layer for large language models that greatly simplifies existing high capacity sparse layers. Sparse layers can dramatically improve the efficiency of training and inference by routing each token to specialized expert modules that contain only a small fraction of the model parameters. However, it can be difficult to learn balanced routing functions that make full use of the available experts; existing approaches typically use routing heuristics or auxiliary expert-balancing loss functions. In contrast, we formulate token-to-expert allocation as a linear assignment problem, allowing an optimal assignment in which each expert receives an equal number of tokens. This optimal assignment scheme improves efficiency by guaranteeing balanced compute loads, and also simplifies training by not requiring any new hyperparameters or auxiliary losses. Code is publicly released.", "bibtex": "@InProceedings{pmlr-v139-lewis21a,\n title = \t {BASE Layers: Simplifying Training of Large, Sparse Models},\n author = {Lewis, Mike and Bhosale, Shruti and Dettmers, Tim and Goyal, Naman and Zettlemoyer, Luke},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6265--6274},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lewis21a/lewis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lewis21a.html},\n abstract = \t {We introduce a new balanced assignment of experts (BASE) layer for large language models that greatly simplifies existing high capacity sparse layers. Sparse layers can dramatically improve the efficiency of training and inference by routing each token to specialized expert modules that contain only a small fraction of the model parameters. However, it can be difficult to learn balanced routing functions that make full use of the available experts; existing approaches typically use routing heuristics or auxiliary expert-balancing loss functions. In contrast, we formulate token-to-expert allocation as a linear assignment problem, allowing an optimal assignment in which each expert receives an equal number of tokens. This optimal assignment scheme improves efficiency by guaranteeing balanced compute loads, and also simplifies training by not requiring any new hyperparameters or auxiliary losses. Code is publicly released.}\n}", "pdf": "http://proceedings.mlr.press/v139/lewis21a/lewis21a.pdf", "supp": "", "pdf_size": 204721, "gs_citation": 293, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10892687538376450252&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research + University of Washington; Facebook AI Research; Facebook AI Research + University of Washington", "aff_domain": "fb.com; ; ; ; ", "email": "fb.com; ; ; ; ", "github": "https://github.com/pytorch/fairseq/Expert1", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lewis21a.html", "aff_unique_index": "0;0;0+1;0;0+1", "aff_unique_norm": "Meta;University of Washington", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.washington.edu", "aff_unique_abbr": "FAIR;UW", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0;0;0+0", "aff_country_unique": "United States" }, { "title": "BASGD: Buffered Asynchronous SGD for Byzantine Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8819", "id": "8819", "proceeding": "http://proceedings.mlr.press/v139/yang21e.html", "slides": "/media/icml-2021/Slides/8819.pdf", "author_site": "Yi-Rui Yang, Wu-Jun Li", "author": "Yi-Rui Yang; Wu-Jun Li", "abstract": "Distributed learning has become a hot research topic due to its wide application in cluster-based large-scale learning, federated learning, edge computing and so on. Most traditional distributed learning methods typically assume no failure or attack. However, many unexpected cases, such as communication failure and even malicious attack, may happen in real applications. Hence, Byzantine learning (BL), which refers to distributed learning with failure or attack, has recently attracted much attention. Most existing BL methods are synchronous, which are impractical in some applications due to heterogeneous or offline workers. In these cases, asynchronous BL (ABL) is usually preferred. In this paper, we propose a novel method, called buffered asynchronous stochastic gradient descent (BASGD), for ABL. To the best of our knowledge, BASGD is the first ABL method that can resist malicious attack without storing any instances on server. Compared with those methods which need to store instances on server, BASGD has a wider scope of application. BASGD is proved to be convergent, and be able to resist failure or attack. Empirical results show that BASGD significantly outperforms vanilla asynchronous stochastic gradient descent (ASGD) and other ABL baselines when there exists failure or attack on workers.", "bibtex": "@InProceedings{pmlr-v139-yang21e,\n title = \t {BASGD: Buffered Asynchronous SGD for Byzantine Learning},\n author = {Yang, Yi-Rui and Li, Wu-Jun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11751--11761},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21e/yang21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21e.html},\n abstract = \t {Distributed learning has become a hot research topic due to its wide application in cluster-based large-scale learning, federated learning, edge computing and so on. Most traditional distributed learning methods typically assume no failure or attack. However, many unexpected cases, such as communication failure and even malicious attack, may happen in real applications. Hence, Byzantine learning (BL), which refers to distributed learning with failure or attack, has recently attracted much attention. Most existing BL methods are synchronous, which are impractical in some applications due to heterogeneous or offline workers. In these cases, asynchronous BL (ABL) is usually preferred. In this paper, we propose a novel method, called buffered asynchronous stochastic gradient descent (BASGD), for ABL. To the best of our knowledge, BASGD is the first ABL method that can resist malicious attack without storing any instances on server. Compared with those methods which need to store instances on server, BASGD has a wider scope of application. BASGD is proved to be convergent, and be able to resist failure or attack. Empirical results show that BASGD significantly outperforms vanilla asynchronous stochastic gradient descent (ASGD) and other ABL baselines when there exists failure or attack on workers.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21e/yang21e.pdf", "supp": "", "pdf_size": 474682, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9478687568362106264&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "National Key Laboratory for Novel Software Technology, Department of Computer Science and Technology, Nanjing University, China; National Key Laboratory for Novel Software Technology, Department of Computer Science and Technology, Nanjing University, China", "aff_domain": "nju.edu.cn;nju.edu.cn", "email": "nju.edu.cn;nju.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yang21e.html", "aff_unique_index": "0;0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "Department of Computer Science and Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "Nanjing U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "BORE: Bayesian Optimization by Density-Ratio Estimation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10201", "id": "10201", "proceeding": "http://proceedings.mlr.press/v139/tiao21a.html", "slides": "", "author_site": "Louis Chi-Chun Tiao, Aaron Klein, Matthias W Seeger, Edwin V Bonilla, Cedric Archambeau, Fabio Ramos", "author": "Louis C Tiao; Aaron Klein; Matthias W Seeger; Edwin V. Bonilla; Cedric Archambeau; Fabio Ramos", "abstract": "Bayesian optimization (BO) is among the most effective and widely-used blackbox optimization methods. BO proposes solutions according to an explore-exploit trade-off criterion encoded in an acquisition function, many of which are computed from the posterior predictive of a probabilistic surrogate model. Prevalent among these is the expected improvement (EI). The need to ensure analytical tractability of the predictive often poses limitations that can hinder the efficiency and applicability of BO. In this paper, we cast the computation of EI as a binary classification problem, building on the link between class-probability estimation and density-ratio estimation, and the lesser-known link between density-ratios and EI. By circumventing the tractability constraints, this reformulation provides numerous advantages, not least in terms of expressiveness, versatility, and scalability.", "bibtex": "@InProceedings{pmlr-v139-tiao21a,\n title = \t {BORE: Bayesian Optimization by Density-Ratio Estimation},\n author = {Tiao, Louis C and Klein, Aaron and Seeger, Matthias W and Bonilla, Edwin V. and Archambeau, Cedric and Ramos, Fabio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10289--10300},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tiao21a/tiao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tiao21a.html},\n abstract = \t {Bayesian optimization (BO) is among the most effective and widely-used blackbox optimization methods. BO proposes solutions according to an explore-exploit trade-off criterion encoded in an acquisition function, many of which are computed from the posterior predictive of a probabilistic surrogate model. Prevalent among these is the expected improvement (EI). The need to ensure analytical tractability of the predictive often poses limitations that can hinder the efficiency and applicability of BO. In this paper, we cast the computation of EI as a binary classification problem, building on the link between class-probability estimation and density-ratio estimation, and the lesser-known link between density-ratios and EI. By circumventing the tractability constraints, this reformulation provides numerous advantages, not least in terms of expressiveness, versatility, and scalability.}\n}", "pdf": "http://proceedings.mlr.press/v139/tiao21a/tiao21a.pdf", "supp": "", "pdf_size": 918673, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4359651384047206579&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of Sydney; CSIRO\u2019s Data61; Amazon; CSIRO\u2019s Data61; Amazon; University of Sydney+NVIDIA", "aff_domain": "sydney.edu.au; ; ; ; ; ", "email": "sydney.edu.au; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/tiao21a.html", "aff_unique_index": "0;1;2;1;2;0+3", "aff_unique_norm": "University of Sydney;CSIRO;Amazon;NVIDIA", "aff_unique_dep": ";Data61;Amazon.com, Inc.;NVIDIA Corporation", "aff_unique_url": "https://www.sydney.edu.au;https://www.csiro.au;https://www.amazon.com;https://www.nvidia.com", "aff_unique_abbr": "USYD;CSIRO;Amazon;NVIDIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0;1;0+1", "aff_country_unique": "Australia;United States" }, { "title": "Backdoor Scanning for Deep Neural Networks through K-Arm Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10181", "id": "10181", "proceeding": "http://proceedings.mlr.press/v139/shen21c.html", "slides": "", "author_site": "Guangyu Shen, Yingqi Liu, Guanhong Tao, Shengwei An, Qiuling Xu, Siyuan Cheng, Shiqing Ma, Xiangyu Zhang", "author": "Guangyu Shen; Yingqi Liu; Guanhong Tao; Shengwei An; Qiuling Xu; Siyuan Cheng; Shiqing Ma; Xiangyu Zhang", "abstract": "Back-door attack poses a severe threat to deep learning systems. It injects hidden malicious behaviors to a model such that any input stamped with a special pattern can trigger such behaviors. Detecting back-door is hence of pressing need. Many existing defense techniques use optimization to generate the smallest input pattern that forces the model to misclassify a set of benign inputs injected with the pattern to a target label. However, the complexity is quadratic to the number of class labels such that they can hardly handle models with many classes. Inspired by Multi-Arm Bandit in Reinforcement Learning, we propose a K-Arm optimization method for backdoor detection. By iteratively and stochastically selecting the most promising labels for optimization with the guidance of an objective function, we substantially reduce the complexity, allowing to handle models with many classes. Moreover, by iteratively refining the selection of labels to optimize, it substantially mitigates the uncertainty in choosing the right labels, improving detection accuracy. At the time of submission, the evaluation of our method on over 4000 models in the IARPA TrojAI competition from round 1 to the latest round 4 achieves top performance on the leaderboard. Our technique also supersedes five state-of-the-art techniques in terms of accuracy and the scanning time needed. The code of our work is available at https://github.com/PurduePAML/K-ARM_Backdoor_Optimization", "bibtex": "@InProceedings{pmlr-v139-shen21c,\n title = \t {Backdoor Scanning for Deep Neural Networks through K-Arm Optimization},\n author = {Shen, Guangyu and Liu, Yingqi and Tao, Guanhong and An, Shengwei and Xu, Qiuling and Cheng, Siyuan and Ma, Shiqing and Zhang, Xiangyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9525--9536},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shen21c/shen21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/shen21c.html},\n abstract = \t {Back-door attack poses a severe threat to deep learning systems. It injects hidden malicious behaviors to a model such that any input stamped with a special pattern can trigger such behaviors. Detecting back-door is hence of pressing need. Many existing defense techniques use optimization to generate the smallest input pattern that forces the model to misclassify a set of benign inputs injected with the pattern to a target label. However, the complexity is quadratic to the number of class labels such that they can hardly handle models with many classes. Inspired by Multi-Arm Bandit in Reinforcement Learning, we propose a K-Arm optimization method for backdoor detection. By iteratively and stochastically selecting the most promising labels for optimization with the guidance of an objective function, we substantially reduce the complexity, allowing to handle models with many classes. Moreover, by iteratively refining the selection of labels to optimize, it substantially mitigates the uncertainty in choosing the right labels, improving detection accuracy. At the time of submission, the evaluation of our method on over 4000 models in the IARPA TrojAI competition from round 1 to the latest round 4 achieves top performance on the leaderboard. Our technique also supersedes five state-of-the-art techniques in terms of accuracy and the scanning time needed. The code of our work is available at https://github.com/PurduePAML/K-ARM_Backdoor_Optimization}\n}", "pdf": "http://proceedings.mlr.press/v139/shen21c/shen21c.pdf", "supp": "", "pdf_size": 1359894, "gs_citation": 139, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18424002237979010229&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Purdue University; Department of Computer Science, Purdue University; Department of Computer Science, Purdue University; Department of Computer Science, Purdue University; Department of Computer Science, Purdue University; Department of Computer Science, Purdue University; Department of Computer Science, Rutgers University; Department of Computer Science, Purdue University", "aff_domain": "purdue.edu; ; ; ; ; ; ; ", "email": "purdue.edu; ; ; ; ; ; ; ", "github": "https://github.com/PurduePAML/K-ARM_Backdoor_Optimization", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/shen21c.html", "aff_unique_index": "0;0;0;0;0;0;1;0", "aff_unique_norm": "Purdue University;Rutgers University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.purdue.edu;https://www.rutgers.edu", "aff_unique_abbr": "Purdue;Rutgers", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Backpropagated Neighborhood Aggregation for Accurate Training of Spiking Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9057", "id": "9057", "proceeding": "http://proceedings.mlr.press/v139/yang21n.html", "slides": "/media/icml-2021/Slides/9057.pdf", "author_site": "Yukun Yang, Wenrui Zhang, Peng Li", "author": "Yukun Yang; Wenrui Zhang; Peng Li", "abstract": "While Backpropagation (BP) has been applied to spiking neural networks (SNNs) achieving encouraging results, a key challenge involved is to backpropagate a differentiable continuous-valued loss over layers of spiking neurons exhibiting discontinuous all-or-none firing activities. Existing methods deal with this difficulty by introducing compromises that come with their own limitations, leading to potential performance degradation. We propose a novel BP-like method, called neighborhood aggregation (NA), which computes accurate error gradients guiding weight updates that may lead to discontinuous modifications of firing activities. NA achieves this goal by aggregating the error gradient over multiple spike trains in the neighborhood of the present spike train of each neuron. The employed aggregation is based on a generalized finite difference approximation with a proposed distance metric quantifying the similarity between a given pair of spike trains. Our experiments show that the proposed NA algorithm delivers state-of-the-art performance for SNN training on several datasets including CIFAR10.", "bibtex": "@InProceedings{pmlr-v139-yang21n,\n title = \t {Backpropagated Neighborhood Aggregation for Accurate Training of Spiking Neural Networks},\n author = {Yang, Yukun and Zhang, Wenrui and Li, Peng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11852--11862},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21n/yang21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21n.html},\n abstract = \t {While Backpropagation (BP) has been applied to spiking neural networks (SNNs) achieving encouraging results, a key challenge involved is to backpropagate a differentiable continuous-valued loss over layers of spiking neurons exhibiting discontinuous all-or-none firing activities. Existing methods deal with this difficulty by introducing compromises that come with their own limitations, leading to potential performance degradation. We propose a novel BP-like method, called neighborhood aggregation (NA), which computes accurate error gradients guiding weight updates that may lead to discontinuous modifications of firing activities. NA achieves this goal by aggregating the error gradient over multiple spike trains in the neighborhood of the present spike train of each neuron. The employed aggregation is based on a generalized finite difference approximation with a proposed distance metric quantifying the similarity between a given pair of spike trains. Our experiments show that the proposed NA algorithm delivers state-of-the-art performance for SNN training on several datasets including CIFAR10.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21n/yang21n.pdf", "supp": "", "pdf_size": 1688190, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5085864001342469497&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, University of California, Santa Barbara, CA 93106; Department of Electrical and Computer Engineering, University of California, Santa Barbara, CA 93106; Department of Electrical and Computer Engineering, University of California, Santa Barbara, CA 93106", "aff_domain": "ucsb.edu;ucsb.edu;ucsb.edu", "email": "ucsb.edu;ucsb.edu;ucsb.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yang21n.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Santa Barbara", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsb.edu", "aff_unique_abbr": "UCSB", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Santa Barbara", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Barlow Twins: Self-Supervised Learning via Redundancy Reduction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10299", "id": "10299", "proceeding": "http://proceedings.mlr.press/v139/zbontar21a.html", "slides": "/media/icml-2021/Slides/10299.pdf", "author_site": "Jure Zbontar, Li Jing, Ishan Misra, yann lecun, Stephane Deny", "author": "Jure Zbontar; Li Jing; Ishan Misra; Yann LeCun; Stephane Deny", "abstract": "Self-supervised learning (SSL) is rapidly closing the gap with supervised methods on large computer vision benchmarks. A successful approach to SSL is to learn embeddings which are invariant to distortions of the input sample. However, a recurring issue with this approach is the existence of trivial constant solutions. Most current methods avoid such solutions by careful implementation details. We propose an objective function that naturally avoids collapse by measuring the cross-correlation matrix between the outputs of two identical networks fed with distorted versions of a sample, and making it as close to the identity matrix as possible. This causes the embedding vectors of distorted versions of a sample to be similar, while minimizing the redundancy between the components of these vectors. The method is called Barlow Twins, owing to neuroscientist H. Barlow\u2019s redundancy-reduction principle applied to a pair of identical networks. Barlow Twins does not require large batches nor asymmetry between the network twins such as a predictor network, gradient stopping, or a moving average on the weight updates. Intriguingly it benefits from very high-dimensional output vectors. Barlow Twins outperforms previous methods on ImageNet for semi-supervised classification in the low-data regime, and is on par with current state of the art for ImageNet classification with a linear classifier head, and for transfer tasks of classification and object detection.", "bibtex": "@InProceedings{pmlr-v139-zbontar21a,\n title = \t {Barlow Twins: Self-Supervised Learning via Redundancy Reduction},\n author = {Zbontar, Jure and Jing, Li and Misra, Ishan and LeCun, Yann and Deny, Stephane},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12310--12320},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zbontar21a/zbontar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zbontar21a.html},\n abstract = \t {Self-supervised learning (SSL) is rapidly closing the gap with supervised methods on large computer vision benchmarks. A successful approach to SSL is to learn embeddings which are invariant to distortions of the input sample. However, a recurring issue with this approach is the existence of trivial constant solutions. Most current methods avoid such solutions by careful implementation details. We propose an objective function that naturally avoids collapse by measuring the cross-correlation matrix between the outputs of two identical networks fed with distorted versions of a sample, and making it as close to the identity matrix as possible. This causes the embedding vectors of distorted versions of a sample to be similar, while minimizing the redundancy between the components of these vectors. The method is called Barlow Twins, owing to neuroscientist H. Barlow\u2019s redundancy-reduction principle applied to a pair of identical networks. Barlow Twins does not require large batches nor asymmetry between the network twins such as a predictor network, gradient stopping, or a moving average on the weight updates. Intriguingly it benefits from very high-dimensional output vectors. Barlow Twins outperforms previous methods on ImageNet for semi-supervised classification in the low-data regime, and is on par with current state of the art for ImageNet classification with a linear classifier head, and for transfer tasks of classification and object detection.}\n}", "pdf": "http://proceedings.mlr.press/v139/zbontar21a/zbontar21a.pdf", "supp": "", "pdf_size": 2245664, "gs_citation": 2939, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5159677840794766125&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Facebook AI Research+New York University; Facebook AI Research+New York University; Facebook AI Research; Facebook AI Research+New York University; Facebook AI Research", "aff_domain": "fb.com;fb.com;fb.com;fb.com;gmail.com", "email": "fb.com;fb.com;fb.com;fb.com;gmail.com", "github": "https://github.com/facebookresearch/barlowtwins", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zbontar21a.html", "aff_unique_index": "0+1;0+1;0;0+1;0", "aff_unique_norm": "Meta;New York University", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.nyu.edu", "aff_unique_abbr": "FAIR;NYU", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "BasisDeVAE: Interpretable Simultaneous Dimensionality Reduction and Feature-Level Clustering with Derivative-Based Variational Autoencoders", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8543", "id": "8543", "proceeding": "http://proceedings.mlr.press/v139/danks21a.html", "slides": "/media/icml-2021/Slides/8543.pdf", "author_site": "Dominic Danks, Christopher Yau", "author": "Dominic Danks; Christopher Yau", "abstract": "The Variational Autoencoder (VAE) performs effective nonlinear dimensionality reduction in a variety of problem settings. However, the black-box neural network decoder function typically employed limits the ability of the decoder function to be constrained and interpreted, making the use of VAEs problematic in settings where prior knowledge should be embedded within the decoder. We present DeVAE, a novel VAE-based model with a derivative-based forward mapping, allowing for greater control over decoder behaviour via specification of the decoder function in derivative space. Additionally, we show how DeVAE can be paired with a sparse clustering prior to create BasisDeVAE and perform interpretable simultaneous dimensionality reduction and feature-level clustering. We demonstrate the performance and scalability of the DeVAE and BasisDeVAE models on synthetic and real-world data and present how the derivative-based approach allows for expressive yet interpretable forward models which respect prior knowledge.", "bibtex": "@InProceedings{pmlr-v139-danks21a,\n title = \t {BasisDeVAE: Interpretable Simultaneous Dimensionality Reduction and Feature-Level Clustering with Derivative-Based Variational Autoencoders},\n author = {Danks, Dominic and Yau, Christopher},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2410--2420},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/danks21a/danks21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/danks21a.html},\n abstract = \t {The Variational Autoencoder (VAE) performs effective nonlinear dimensionality reduction in a variety of problem settings. However, the black-box neural network decoder function typically employed limits the ability of the decoder function to be constrained and interpreted, making the use of VAEs problematic in settings where prior knowledge should be embedded within the decoder. We present DeVAE, a novel VAE-based model with a derivative-based forward mapping, allowing for greater control over decoder behaviour via specification of the decoder function in derivative space. Additionally, we show how DeVAE can be paired with a sparse clustering prior to create BasisDeVAE and perform interpretable simultaneous dimensionality reduction and feature-level clustering. We demonstrate the performance and scalability of the DeVAE and BasisDeVAE models on synthetic and real-world data and present how the derivative-based approach allows for expressive yet interpretable forward models which respect prior knowledge.}\n}", "pdf": "http://proceedings.mlr.press/v139/danks21a/danks21a.pdf", "supp": "", "pdf_size": 3993972, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11166443174416482669&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Institute of Cancer and Genomic Sciences, University of Birmingham, Birmingham, UK+The Alan Turing Institute, London, UK+Division of Informatics, Imaging & Data Sciences, Unversity of Manchester, Manchester, UK+Health Data Research UK, London, UK; The Alan Turing Institute, London, UK+Division of Informatics, Imaging & Data Sciences, Unversity of Manchester, Manchester, UK+Health Data Research UK, London, UK", "aff_domain": "turing.ac.uk; ", "email": "turing.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/danks21a.html", "aff_unique_index": "0+1+2+3;1+2+3", "aff_unique_norm": "University of Birmingham;Alan Turing Institute;University of Manchester;Health Data Research UK", "aff_unique_dep": "Institute of Cancer and Genomic Sciences;;Division of Informatics, Imaging & Data Sciences;", "aff_unique_url": "https://www.birmingham.ac.uk;https://www.turing.ac.uk;https://www.manchester.ac.uk;https://www.hdruk.ac.uk", "aff_unique_abbr": "UoB;ATI;UoM;HDR UK", "aff_campus_unique_index": "0+1+2+1;1+2+1", "aff_campus_unique": "Birmingham;London;Manchester", "aff_country_unique_index": "0+0+0+0;0+0+0", "aff_country_unique": "United Kingdom" }, { "title": "Batch Value-function Approximation with Only Realizability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9079", "id": "9079", "proceeding": "http://proceedings.mlr.press/v139/xie21d.html", "slides": "/media/icml-2021/Slides/9079.pdf", "author_site": "Tengyang Xie, Nan Jiang", "author": "Tengyang Xie; Nan Jiang", "abstract": "We make progress in a long-standing problem of batch reinforcement learning (RL): learning Q* from an exploratory and polynomial-sized dataset, using a realizable and otherwise arbitrary function class. In fact, all existing algorithms demand function-approximation assumptions stronger than realizability, and the mounting negative evidence has led to a conjecture that sample-efficient learning is impossible in this setting (Chen & Jiang, 2019). Our algorithm, BVFT, breaks the hardness conjecture (albeit under a stronger notion of exploratory data) via a tournament procedure that reduces the learning problem to pairwise comparison, and solves the latter with the help of a state-action-space partition constructed from the compared functions. We also discuss how BVFT can be applied to model selection among other extensions and open problems.", "bibtex": "@InProceedings{pmlr-v139-xie21d,\n title = \t {Batch Value-function Approximation with Only Realizability},\n author = {Xie, Tengyang and Jiang, Nan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11404--11413},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21d/xie21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21d.html},\n abstract = \t {We make progress in a long-standing problem of batch reinforcement learning (RL): learning Q* from an exploratory and polynomial-sized dataset, using a realizable and otherwise arbitrary function class. In fact, all existing algorithms demand function-approximation assumptions stronger than realizability, and the mounting negative evidence has led to a conjecture that sample-efficient learning is impossible in this setting (Chen & Jiang, 2019). Our algorithm, BVFT, breaks the hardness conjecture (albeit under a stronger notion of exploratory data) via a tournament procedure that reduces the learning problem to pairwise comparison, and solves the latter with the help of a state-action-space partition constructed from the compared functions. We also discuss how BVFT can be applied to model selection among other extensions and open problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21d/xie21d.pdf", "supp": "", "pdf_size": 359638, "gs_citation": 141, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4762926560369182403&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Illinois at Urbana-Champaign, Illinois, USA; Department of Computer Science, University of Illinois at Urbana-Champaign, Illinois, USA", "aff_domain": "illinois.edu;illinois.edu", "email": "illinois.edu;illinois.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/xie21d.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Bayesian Algorithm Execution: Estimating Computable Properties of Black-box Functions Using Mutual Information", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10675", "id": "10675", "proceeding": "http://proceedings.mlr.press/v139/neiswanger21a.html", "slides": "/media/icml-2021/Slides/10675.pdf", "author_site": "Willie Neiswanger, Ke Alexander Wang, Stefano Ermon", "author": "Willie Neiswanger; Ke Alexander Wang; Stefano Ermon", "abstract": "In many real world problems, we want to infer some property of an expensive black-box function f, given a budget of T function evaluations. One example is budget constrained global optimization of f, for which Bayesian optimization is a popular method. Other properties of interest include local optima, level sets, integrals, or graph-structured information induced by f. Often, we can find an algorithm A to compute the desired property, but it may require far more than T queries to execute. Given such an A, and a prior distribution over f, we refer to the problem of inferring the output of A using T evaluations as Bayesian Algorithm Execution (BAX). To tackle this problem, we present a procedure, InfoBAX, that sequentially chooses queries that maximize mutual information with respect to the algorithm\u2019s output. Applying this to Dijkstra\u2019s algorithm, for instance, we infer shortest paths in synthetic and real-world graphs with black-box edge costs. Using evolution strategies, we yield variants of Bayesian optimization that target local, rather than global, optima. On these problems, InfoBAX uses up to 500 times fewer queries to f than required by the original algorithm. Our method is closely connected to other Bayesian optimal experimental design procedures such as entropy search methods and optimal sensor placement using Gaussian processes.", "bibtex": "@InProceedings{pmlr-v139-neiswanger21a,\n title = \t {Bayesian Algorithm Execution: Estimating Computable Properties of Black-box Functions Using Mutual Information},\n author = {Neiswanger, Willie and Wang, Ke Alexander and Ermon, Stefano},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8005--8015},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/neiswanger21a/neiswanger21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/neiswanger21a.html},\n abstract = \t {In many real world problems, we want to infer some property of an expensive black-box function f, given a budget of T function evaluations. One example is budget constrained global optimization of f, for which Bayesian optimization is a popular method. Other properties of interest include local optima, level sets, integrals, or graph-structured information induced by f. Often, we can find an algorithm A to compute the desired property, but it may require far more than T queries to execute. Given such an A, and a prior distribution over f, we refer to the problem of inferring the output of A using T evaluations as Bayesian Algorithm Execution (BAX). To tackle this problem, we present a procedure, InfoBAX, that sequentially chooses queries that maximize mutual information with respect to the algorithm\u2019s output. Applying this to Dijkstra\u2019s algorithm, for instance, we infer shortest paths in synthetic and real-world graphs with black-box edge costs. Using evolution strategies, we yield variants of Bayesian optimization that target local, rather than global, optima. On these problems, InfoBAX uses up to 500 times fewer queries to f than required by the original algorithm. Our method is closely connected to other Bayesian optimal experimental design procedures such as entropy search methods and optimal sensor placement using Gaussian processes.}\n}", "pdf": "http://proceedings.mlr.press/v139/neiswanger21a/neiswanger21a.pdf", "supp": "", "pdf_size": 1627618, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10668214102939988393&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Stanford University, Computer Science Department; Stanford University, Computer Science Department; Stanford University, Computer Science Department", "aff_domain": "cs.stanford.edu; ; ", "email": "cs.stanford.edu; ; ", "github": "", "project": "https://arxiv.org/abs/2104.09460", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/neiswanger21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Bayesian Attention Belief Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9005", "id": "9005", "proceeding": "http://proceedings.mlr.press/v139/zhang21f.html", "slides": "", "author_site": "Shujian Zhang, Xinjie Fan, Bo Chen, Mingyuan Zhou", "author": "Shujian Zhang; Xinjie Fan; Bo Chen; Mingyuan Zhou", "abstract": "Attention-based neural networks have achieved state-of-the-art results on a wide range of tasks. Most such models use deterministic attention while stochastic attention is less explored due to the optimization difficulties or complicated model design. This paper introduces Bayesian attention belief networks, which construct a decoder network by modeling unnormalized attention weights with a hierarchy of gamma distributions, and an encoder network by stacking Weibull distributions with a deterministic-upward-stochastic-downward structure to approximate the posterior. The resulting auto-encoding networks can be optimized in a differentiable way with a variational lower bound. It is simple to convert any models with deterministic attention, including pretrained ones, to the proposed Bayesian attention belief networks. On a variety of language understanding tasks, we show that our method outperforms deterministic attention and state-of-the-art stochastic attention in accuracy, uncertainty estimation, generalization across domains, and robustness to adversarial attacks. We further demonstrate the general applicability of our method on neural machine translation and visual question answering, showing great potential of incorporating our method into various attention-related tasks.", "bibtex": "@InProceedings{pmlr-v139-zhang21f,\n title = \t {Bayesian Attention Belief Networks},\n author = {Zhang, Shujian and Fan, Xinjie and Chen, Bo and Zhou, Mingyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12413--12426},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21f/zhang21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21f.html},\n abstract = \t {Attention-based neural networks have achieved state-of-the-art results on a wide range of tasks. Most such models use deterministic attention while stochastic attention is less explored due to the optimization difficulties or complicated model design. This paper introduces Bayesian attention belief networks, which construct a decoder network by modeling unnormalized attention weights with a hierarchy of gamma distributions, and an encoder network by stacking Weibull distributions with a deterministic-upward-stochastic-downward structure to approximate the posterior. The resulting auto-encoding networks can be optimized in a differentiable way with a variational lower bound. It is simple to convert any models with deterministic attention, including pretrained ones, to the proposed Bayesian attention belief networks. On a variety of language understanding tasks, we show that our method outperforms deterministic attention and state-of-the-art stochastic attention in accuracy, uncertainty estimation, generalization across domains, and robustness to adversarial attacks. We further demonstrate the general applicability of our method on neural machine translation and visual question answering, showing great potential of incorporating our method into various attention-related tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21f/zhang21f.pdf", "supp": "", "pdf_size": 745651, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6520539785047236497&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The University of Texas at Austin; The University of Texas at Austin; Xidian University; The University of Texas at Austin", "aff_domain": "utexas.edu;utexas.edu;xidian.edu.cn;mccombs.utexas.edu", "email": "utexas.edu;utexas.edu;xidian.edu.cn;mccombs.utexas.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhang21f.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "University of Texas at Austin;Xidian University", "aff_unique_dep": ";", "aff_unique_url": "https://www.utexas.edu;http://www.xidian.edu.cn/", "aff_unique_abbr": "UT Austin;Xidian", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Austin;", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Bayesian Deep Learning via Subnetwork Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9939", "id": "9939", "proceeding": "http://proceedings.mlr.press/v139/daxberger21a.html", "slides": "/media/icml-2021/Slides/9939.pdf", "author_site": "Erik Daxberger, Eric Nalisnick, James Allingham, Javier Antor\u00e1n, Jose Miguel Hernandez-Lobato", "author": "Erik Daxberger; Eric Nalisnick; James U Allingham; Javier Antoran; Jose Miguel Hernandez-Lobato", "abstract": "The Bayesian paradigm has the potential to solve core issues of deep neural networks such as poor calibration and data inefficiency. Alas, scaling Bayesian inference to large weight spaces often requires restrictive approximations. In this work, we show that it suffices to perform inference over a small subset of model weights in order to obtain accurate predictive posteriors. The other weights are kept as point estimates. This subnetwork inference framework enables us to use expressive, otherwise intractable, posterior approximations over such subsets. In particular, we implement subnetwork linearized Laplace as a simple, scalable Bayesian deep learning method: We first obtain a MAP estimate of all weights and then infer a full-covariance Gaussian posterior over a subnetwork using the linearized Laplace approximation. We propose a subnetwork selection strategy that aims to maximally preserve the model\u2019s predictive uncertainty. Empirically, our approach compares favorably to ensembles and less expressive posterior approximations over full networks.", "bibtex": "@InProceedings{pmlr-v139-daxberger21a,\n title = \t {Bayesian Deep Learning via Subnetwork Inference},\n author = {Daxberger, Erik and Nalisnick, Eric and Allingham, James U and Antoran, Javier and Hernandez-Lobato, Jose Miguel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2510--2521},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/daxberger21a/daxberger21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/daxberger21a.html},\n abstract = \t {The Bayesian paradigm has the potential to solve core issues of deep neural networks such as poor calibration and data inefficiency. Alas, scaling Bayesian inference to large weight spaces often requires restrictive approximations. In this work, we show that it suffices to perform inference over a small subset of model weights in order to obtain accurate predictive posteriors. The other weights are kept as point estimates. This subnetwork inference framework enables us to use expressive, otherwise intractable, posterior approximations over such subsets. In particular, we implement subnetwork linearized Laplace as a simple, scalable Bayesian deep learning method: We first obtain a MAP estimate of all weights and then infer a full-covariance Gaussian posterior over a subnetwork using the linearized Laplace approximation. We propose a subnetwork selection strategy that aims to maximally preserve the model\u2019s predictive uncertainty. Empirically, our approach compares favorably to ensembles and less expressive posterior approximations over full networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/daxberger21a/daxberger21a.pdf", "supp": "", "pdf_size": 2126361, "gs_citation": 127, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4967391317568444060&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Cambridge + Max Planck Institute for Intelligent Systems, T\u00fcbingen; University of Amsterdam; University of Cambridge; University of Cambridge; University of Cambridge + Microsoft Research + The Alan Turing Institute", "aff_domain": "cam.ac.uk; ; ; ; ", "email": "cam.ac.uk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/daxberger21a.html", "aff_unique_index": "0+1;2;0;0;0+3+4", "aff_unique_norm": "University of Cambridge;Max Planck Institute for Intelligent Systems;University of Amsterdam;Microsoft;Alan Turing Institute", "aff_unique_dep": ";;;Microsoft Research;", "aff_unique_url": "https://www.cam.ac.uk;https://www.mpi-is.mpg.de;https://www.uva.nl;https://www.microsoft.com/en-us/research;https://www.turing.ac.uk", "aff_unique_abbr": "Cambridge;MPI-IS;UvA;MSR;ATI", "aff_campus_unique_index": "0+1;0;0;0", "aff_campus_unique": "Cambridge;T\u00fcbingen;", "aff_country_unique_index": "0+1;2;0;0;0+3+0", "aff_country_unique": "United Kingdom;Germany;Netherlands;United States" }, { "title": "Bayesian Optimistic Optimisation with Exponentially Decaying Regret", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9763", "id": "9763", "proceeding": "http://proceedings.mlr.press/v139/tran-the21a.html", "slides": "", "author_site": "Hung Tran-The, Sunil Gupta, Santu Rana, Svetha Venkatesh", "author": "Hung Tran-The; Sunil Gupta; Santu Rana; Svetha Venkatesh", "abstract": "Bayesian optimisation (BO) is a well known algorithm for finding the global optimum of expensive, black-box functions. The current practical BO algorithms have regret bounds ranging from $\\mathcal{O}(\\frac{logN}{\\sqrt{N}})$ to $\\mathcal O(e^{-\\sqrt{N}})$, where $N$ is the number of evaluations. This paper explores the possibility of improving the regret bound in the noise-free setting by intertwining concepts from BO and optimistic optimisation methods which are based on partitioning the search space. We propose the BOO algorithm, a first practical approach which can achieve an exponential regret bound with order $\\mathcal O(N^{-\\sqrt{N}})$ under the assumption that the objective function is sampled from a Gaussian process with a Mat\u00e9rn kernel with smoothness parameter $\\nu > 4 +\\frac{D}{2}$, where $D$ is the number of dimensions. We perform experiments on optimisation of various synthetic functions and machine learning hyperparameter tuning tasks and show that our algorithm outperforms baselines.", "bibtex": "@InProceedings{pmlr-v139-tran-the21a,\n title = \t {Bayesian Optimistic Optimisation with Exponentially Decaying Regret},\n author = {Tran-The, Hung and Gupta, Sunil and Rana, Santu and Venkatesh, Svetha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10390--10400},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tran-the21a/tran-the21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tran-the21a.html},\n abstract = \t {Bayesian optimisation (BO) is a well known algorithm for finding the global optimum of expensive, black-box functions. The current practical BO algorithms have regret bounds ranging from $\\mathcal{O}(\\frac{logN}{\\sqrt{N}})$ to $\\mathcal O(e^{-\\sqrt{N}})$, where $N$ is the number of evaluations. This paper explores the possibility of improving the regret bound in the noise-free setting by intertwining concepts from BO and optimistic optimisation methods which are based on partitioning the search space. We propose the BOO algorithm, a first practical approach which can achieve an exponential regret bound with order $\\mathcal O(N^{-\\sqrt{N}})$ under the assumption that the objective function is sampled from a Gaussian process with a Mat\u00e9rn kernel with smoothness parameter $\\nu > 4 +\\frac{D}{2}$, where $D$ is the number of dimensions. We perform experiments on optimisation of various synthetic functions and machine learning hyperparameter tuning tasks and show that our algorithm outperforms baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/tran-the21a/tran-the21a.pdf", "supp": "", "pdf_size": 6381087, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:y1h31Jgtz8EJ:scholar.google.com/&scioq=Bayesian+Optimistic+Optimisation+with+Exponentially+Decaying+Regret&hl=en&as_sdt=0,5", "gs_version_total": 5, "aff": "Applied Arti\ufb01cial Intelligence Institute, Deakin University, Geelong, Australia; Applied Arti\ufb01cial Intelligence Institute, Deakin University, Geelong, Australia; Applied Arti\ufb01cial Intelligence Institute, Deakin University, Geelong, Australia; Applied Arti\ufb01cial Intelligence Institute, Deakin University, Geelong, Australia", "aff_domain": "deakin.edu.au; ; ; ", "email": "deakin.edu.au; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tran-the21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Deakin University", "aff_unique_dep": "Applied Arti\ufb01cial Intelligence Institute", "aff_unique_url": "https://www.deakin.edu.au", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Geelong", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Australia" }, { "title": "Bayesian Optimization over Hybrid Spaces", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9183", "id": "9183", "proceeding": "http://proceedings.mlr.press/v139/deshwal21a.html", "slides": "/media/icml-2021/Slides/9183.pdf", "author_site": "Aryan Deshwal, Syrine Belakaria, Jana Doppa", "author": "Aryan Deshwal; Syrine Belakaria; Janardhan Rao Doppa", "abstract": "We consider the problem of optimizing hybrid structures (mixture of discrete and continuous input variables) via expensive black-box function evaluations. This problem arises in many real-world applications. For example, in materials design optimization via lab experiments, discrete and continuous variables correspond to the presence/absence of primitive elements and their relative concentrations respectively. The key challenge is to accurately model the complex interactions between discrete and continuous variables. In this paper, we propose a novel approach referred as Hybrid Bayesian Optimization (HyBO) by utilizing diffusion kernels, which are naturally defined over continuous and discrete variables. We develop a principled approach for constructing diffusion kernels over hybrid spaces by utilizing the additive kernel formulation, which allows additive interactions of all orders in a tractable manner. We theoretically analyze the modeling strength of additive hybrid kernels and prove that it has the universal approximation property. Our experiments on synthetic and six diverse real-world benchmarks show that HyBO significantly outperforms the state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-deshwal21a,\n title = \t {Bayesian Optimization over Hybrid Spaces},\n author = {Deshwal, Aryan and Belakaria, Syrine and Doppa, Janardhan Rao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2632--2643},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/deshwal21a/deshwal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/deshwal21a.html},\n abstract = \t {We consider the problem of optimizing hybrid structures (mixture of discrete and continuous input variables) via expensive black-box function evaluations. This problem arises in many real-world applications. For example, in materials design optimization via lab experiments, discrete and continuous variables correspond to the presence/absence of primitive elements and their relative concentrations respectively. The key challenge is to accurately model the complex interactions between discrete and continuous variables. In this paper, we propose a novel approach referred as Hybrid Bayesian Optimization (HyBO) by utilizing diffusion kernels, which are naturally defined over continuous and discrete variables. We develop a principled approach for constructing diffusion kernels over hybrid spaces by utilizing the additive kernel formulation, which allows additive interactions of all orders in a tractable manner. We theoretically analyze the modeling strength of additive hybrid kernels and prove that it has the universal approximation property. Our experiments on synthetic and six diverse real-world benchmarks show that HyBO significantly outperforms the state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/deshwal21a/deshwal21a.pdf", "supp": "", "pdf_size": 5406279, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10724416920548508977&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "School of EECS, Washington State University, Pullman, USA; School of EECS, Washington State University, Pullman, USA; School of EECS, Washington State University, Pullman, USA", "aff_domain": "wsu.edu; ; ", "email": "wsu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/deshwal21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Washington State University", "aff_unique_dep": "School of EECS", "aff_unique_url": "https://wsu.edu", "aff_unique_abbr": "WSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pullman", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Bayesian Quadrature on Riemannian Data Manifolds", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9655", "id": "9655", "proceeding": "http://proceedings.mlr.press/v139/frohlich21a.html", "slides": "", "author_site": "Christian Fr\u00f6hlich, Alexandra Gessner, Philipp Hennig, Bernhard Sch\u00f6lkopf, Georgios Arvanitidis", "author": "Christian Fr\u00f6hlich; Alexandra Gessner; Philipp Hennig; Bernhard Sch\u00f6lkopf; Georgios Arvanitidis", "abstract": "Riemannian manifolds provide a principled way to model nonlinear geometric structure inherent in data. A Riemannian metric on said manifolds determines geometry-aware shortest paths and provides the means to define statistical models accordingly. However, these operations are typically computationally demanding. To ease this computational burden, we advocate probabilistic numerical methods for Riemannian statistics. In particular, we focus on Bayesian quadrature (BQ) to numerically compute integrals over normal laws on Riemannian manifolds learned from data. In this task, each function evaluation relies on the solution of an expensive initial value problem. We show that by leveraging both prior knowledge and an active exploration scheme, BQ significantly reduces the number of required evaluations and thus outperforms Monte Carlo methods on a wide range of integration problems. As a concrete application, we highlight the merits of adopting Riemannian geometry with our proposed framework on a nonlinear dataset from molecular dynamics.", "bibtex": "@InProceedings{pmlr-v139-frohlich21a,\n title = \t {Bayesian Quadrature on Riemannian Data Manifolds},\n author = {Fr{\\\"o}hlich, Christian and Gessner, Alexandra and Hennig, Philipp and Sch{\\\"o}lkopf, Bernhard and Arvanitidis, Georgios},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3459--3468},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/frohlich21a/frohlich21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/frohlich21a.html},\n abstract = \t {Riemannian manifolds provide a principled way to model nonlinear geometric structure inherent in data. A Riemannian metric on said manifolds determines geometry-aware shortest paths and provides the means to define statistical models accordingly. However, these operations are typically computationally demanding. To ease this computational burden, we advocate probabilistic numerical methods for Riemannian statistics. In particular, we focus on Bayesian quadrature (BQ) to numerically compute integrals over normal laws on Riemannian manifolds learned from data. In this task, each function evaluation relies on the solution of an expensive initial value problem. We show that by leveraging both prior knowledge and an active exploration scheme, BQ significantly reduces the number of required evaluations and thus outperforms Monte Carlo methods on a wide range of integration problems. As a concrete application, we highlight the merits of adopting Riemannian geometry with our proposed framework on a nonlinear dataset from molecular dynamics.}\n}", "pdf": "http://proceedings.mlr.press/v139/frohlich21a/frohlich21a.pdf", "supp": "", "pdf_size": 4221658, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14587892748613209913&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "University of T\u00fcbingen, Germany; University of T\u00fcbingen, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; University of T\u00fcbingen, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "student.uni-tuebingen.de; ; ; ; ", "email": "student.uni-tuebingen.de; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/frohlich21a.html", "aff_unique_index": "0;0+1;0+1;1;1", "aff_unique_norm": "University of T\u00fcbingen;Max Planck Institute for Intelligent Systems", "aff_unique_dep": ";", "aff_unique_url": "https://www.uni-tuebingen.de/;https://www.mpi-is.mpg.de", "aff_unique_abbr": "Uni T\u00fcbingen;MPI-IS", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";T\u00fcbingen", "aff_country_unique_index": "0;0+0;0+0;0;0", "aff_country_unique": "Germany" }, { "title": "Bayesian Structural Adaptation for Continual Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9931", "id": "9931", "proceeding": "http://proceedings.mlr.press/v139/kumar21a.html", "slides": "", "author_site": "Abhishek Kumar, Sunabha Chatterjee, Piyush Rai", "author": "Abhishek Kumar; Sunabha Chatterjee; Piyush Rai", "abstract": "Continual Learning is a learning paradigm where learning systems are trained on a sequence of tasks. The goal here is to perform well on the current task without suffering from a performance drop on the previous tasks. Two notable directions among the recent advances in continual learning with neural networks are (1) variational Bayes based regularization by learning priors from previous tasks, and, (2) learning the structure of deep networks to adapt to new tasks. So far, these two approaches have been largely orthogonal. We present a novel Bayesian framework based on continually learning the structure of deep neural networks, to unify these distinct yet complementary approaches. The proposed framework learns the deep structure for each task by learning which weights to be used, and supports inter-task transfer through the overlapping of different sparse subsets of weights learned by different tasks. An appealing aspect of our proposed continual learning framework is that it is applicable to both discriminative (supervised) and generative (unsupervised) settings. Experimental results on supervised and unsupervised benchmarks demonstrate that our approach performs comparably or better than recent advances in continual learning.", "bibtex": "@InProceedings{pmlr-v139-kumar21a,\n title = \t {Bayesian Structural Adaptation for Continual Learning},\n author = {Kumar, Abhishek and Chatterjee, Sunabha and Rai, Piyush},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5850--5860},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kumar21a/kumar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kumar21a.html},\n abstract = \t {Continual Learning is a learning paradigm where learning systems are trained on a sequence of tasks. The goal here is to perform well on the current task without suffering from a performance drop on the previous tasks. Two notable directions among the recent advances in continual learning with neural networks are (1) variational Bayes based regularization by learning priors from previous tasks, and, (2) learning the structure of deep networks to adapt to new tasks. So far, these two approaches have been largely orthogonal. We present a novel Bayesian framework based on continually learning the structure of deep neural networks, to unify these distinct yet complementary approaches. The proposed framework learns the deep structure for each task by learning which weights to be used, and supports inter-task transfer through the overlapping of different sparse subsets of weights learned by different tasks. An appealing aspect of our proposed continual learning framework is that it is applicable to both discriminative (supervised) and generative (unsupervised) settings. Experimental results on supervised and unsupervised benchmarks demonstrate that our approach performs comparably or better than recent advances in continual learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/kumar21a/kumar21a.pdf", "supp": "", "pdf_size": 2609135, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2211105986513987829&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Microsoft, India; SAP Labs, India; Department of Computer Science, IIT Kanpur, India", "aff_domain": "cse.iitk.ac.in; ;cse.iitk.ac.in", "email": "cse.iitk.ac.in; ;cse.iitk.ac.in", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kumar21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Microsoft;SAP Labs;IIT Kanpur", "aff_unique_dep": "Microsoft Corporation;;Department of Computer Science", "aff_unique_url": "https://www.microsoft.com/en-in;https://labs.sap/;https://www.iitk.ac.in", "aff_unique_abbr": "Microsoft;SAP Labs;IITK", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "Benchmarks, Algorithms, and Metrics for Hierarchical Disentanglement", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9179", "id": "9179", "proceeding": "http://proceedings.mlr.press/v139/ross21a.html", "slides": "", "author_site": "Andrew Ross, Finale Doshi-Velez", "author": "Andrew Ross; Finale Doshi-Velez", "abstract": "In representation learning, there has been recent interest in developing algorithms to disentangle the ground-truth generative factors behind a dataset, and metrics to quantify how fully this occurs. However, these algorithms and metrics often assume that both representations and ground-truth factors are flat, continuous, and factorized, whereas many real-world generative processes involve rich hierarchical structure, mixtures of discrete and continuous variables with dependence between them, and even varying intrinsic dimensionality. In this work, we develop benchmarks, algorithms, and metrics for learning such hierarchical representations.", "bibtex": "@InProceedings{pmlr-v139-ross21a,\n title = \t {Benchmarks, Algorithms, and Metrics for Hierarchical Disentanglement},\n author = {Ross, Andrew and Doshi-Velez, Finale},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9084--9094},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ross21a/ross21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ross21a.html},\n abstract = \t {In representation learning, there has been recent interest in developing algorithms to disentangle the ground-truth generative factors behind a dataset, and metrics to quantify how fully this occurs. However, these algorithms and metrics often assume that both representations and ground-truth factors are flat, continuous, and factorized, whereas many real-world generative processes involve rich hierarchical structure, mixtures of discrete and continuous variables with dependence between them, and even varying intrinsic dimensionality. In this work, we develop benchmarks, algorithms, and metrics for learning such hierarchical representations.}\n}", "pdf": "http://proceedings.mlr.press/v139/ross21a/ross21a.pdf", "supp": "", "pdf_size": 811311, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9234964175960458338&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Harvard University; Harvard University", "aff_domain": "g.harvard.edu; ", "email": "g.harvard.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ross21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "", "aff_unique_url": "https://www.harvard.edu", "aff_unique_abbr": "Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Besov Function Approximation and Binary Classification on Low-Dimensional Manifolds Using Convolutional Residual Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9145", "id": "9145", "proceeding": "http://proceedings.mlr.press/v139/liu21e.html", "slides": "/media/icml-2021/Slides/9145.pdf", "author_site": "Hao Liu, Minshuo Chen, Tuo Zhao, Wenjing Liao", "author": "Hao Liu; Minshuo Chen; Tuo Zhao; Wenjing Liao", "abstract": "Most of existing statistical theories on deep neural networks have sample complexities cursed by the data dimension and therefore cannot well explain the empirical success of deep learning on high-dimensional data. To bridge this gap, we propose to exploit the low-dimensional structures of the real world datasets and establish theoretical guarantees of convolutional residual networks (ConvResNet) in terms of function approximation and statistical recovery for binary classification problem. Specifically, given the data lying on a $d$-dimensional manifold isometrically embedded in $\\mathbb{R}^D$, we prove that if the network architecture is properly chosen, ConvResNets can (1) approximate {\\it Besov functions} on manifolds with arbitrary accuracy, and (2) learn a classifier by minimizing the empirical logistic risk, which gives an {\\it excess risk} in the order of $n^{-\\frac{s}{2s+2(s\\vee d)}}$, where $s$ is a smoothness parameter. This implies that the sample complexity depends on the intrinsic dimension $d$, instead of the data dimension $D$. Our results demonstrate that ConvResNets are adaptive to low-dimensional structures of data sets.", "bibtex": "@InProceedings{pmlr-v139-liu21e,\n title = \t {Besov Function Approximation and Binary Classification on Low-Dimensional Manifolds Using Convolutional Residual Networks},\n author = {Liu, Hao and Chen, Minshuo and Zhao, Tuo and Liao, Wenjing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6770--6780},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21e/liu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21e.html},\n abstract = \t {Most of existing statistical theories on deep neural networks have sample complexities cursed by the data dimension and therefore cannot well explain the empirical success of deep learning on high-dimensional data. To bridge this gap, we propose to exploit the low-dimensional structures of the real world datasets and establish theoretical guarantees of convolutional residual networks (ConvResNet) in terms of function approximation and statistical recovery for binary classification problem. Specifically, given the data lying on a $d$-dimensional manifold isometrically embedded in $\\mathbb{R}^D$, we prove that if the network architecture is properly chosen, ConvResNets can (1) approximate {\\it Besov functions} on manifolds with arbitrary accuracy, and (2) learn a classifier by minimizing the empirical logistic risk, which gives an {\\it excess risk} in the order of $n^{-\\frac{s}{2s+2(s\\vee d)}}$, where $s$ is a smoothness parameter. This implies that the sample complexity depends on the intrinsic dimension $d$, instead of the data dimension $D$. Our results demonstrate that ConvResNets are adaptive to low-dimensional structures of data sets.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21e/liu21e.pdf", "supp": "", "pdf_size": 1867734, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11090309112996367629&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Mathematics, Hong Kong Baptist University; School of Industrial and Systems Engineering, Georgia Institute of Technology; School of Industrial and Systems Engineering, Georgia Institute of Technology; School of Mathematics, Georgia Institute of Technology", "aff_domain": "hkbu.edu.hk;gatech.edu;gatech.edu;gatech.edu", "email": "hkbu.edu.hk;gatech.edu;gatech.edu;gatech.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21e.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Hong Kong Baptist University;Georgia Institute of Technology", "aff_unique_dep": "Department of Mathematics;School of Industrial and Systems Engineering", "aff_unique_url": "https://www.hkbu.edu.hk;https://www.gatech.edu", "aff_unique_abbr": "HKBU;Georgia Tech", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Hong Kong SAR;Atlanta", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "China;United States" }, { "title": "Best Arm Identification in Graphical Bilinear Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10093", "id": "10093", "proceeding": "http://proceedings.mlr.press/v139/rizk21a.html", "slides": "", "author_site": "Geovani Rizk, Albert Thomas, Igor Colin, Rida Laraki, Yann Chevaleyre", "author": "Geovani Rizk; Albert Thomas; Igor Colin; Rida Laraki; Yann Chevaleyre", "abstract": "We introduce a new graphical bilinear bandit problem where a learner (or a \\emph{central entity}) allocates arms to the nodes of a graph and observes for each edge a noisy bilinear reward representing the interaction between the two end nodes. We study the best arm identification problem in which the learner wants to find the graph allocation maximizing the sum of the bilinear rewards. By efficiently exploiting the geometry of this bandit problem, we propose a \\emph{decentralized} allocation strategy based on random sampling with theoretical guarantees. In particular, we characterize the influence of the graph structure (e.g. star, complete or circle) on the convergence rate and propose empirical experiments that confirm this dependency.", "bibtex": "@InProceedings{pmlr-v139-rizk21a,\n title = \t {Best Arm Identification in Graphical Bilinear Bandits},\n author = {Rizk, Geovani and Thomas, Albert and Colin, Igor and Laraki, Rida and Chevaleyre, Yann},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9010--9019},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rizk21a/rizk21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rizk21a.html},\n abstract = \t {We introduce a new graphical bilinear bandit problem where a learner (or a \\emph{central entity}) allocates arms to the nodes of a graph and observes for each edge a noisy bilinear reward representing the interaction between the two end nodes. We study the best arm identification problem in which the learner wants to find the graph allocation maximizing the sum of the bilinear rewards. By efficiently exploiting the geometry of this bandit problem, we propose a \\emph{decentralized} allocation strategy based on random sampling with theoretical guarantees. In particular, we characterize the influence of the graph structure (e.g. star, complete or circle) on the convergence rate and propose empirical experiments that confirm this dependency.}\n}", "pdf": "http://proceedings.mlr.press/v139/rizk21a/rizk21a.pdf", "supp": "", "pdf_size": 1106767, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14120526064984866314&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "PSL - Universit\u00e9 Paris Dauphine, CNRS, LAMSADE, Paris, France+Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; PSL - Universit\u00e9 Paris Dauphine, CNRS, LAMSADE, Paris, France+Liverpool University; PSL - Universit\u00e9 Paris Dauphine, CNRS, LAMSADE, Paris, France", "aff_domain": "dauphine.psl.eu; ; ; ; ", "email": "dauphine.psl.eu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/rizk21a.html", "aff_unique_index": "0+1;1;1;0+2;0", "aff_unique_norm": "Universit\u00e9 Paris Dauphine;Huawei;University of Liverpool", "aff_unique_dep": "LAMSADE;Noah\u2019s Ark Lab;", "aff_unique_url": "https://www.universite-paris-dauphine.fr;https://www.huawei.com;https://www.liverpool.ac.uk", "aff_unique_abbr": "UPD;Huawei;Liv Uni", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Paris;", "aff_country_unique_index": "0+1;1;1;0+2;0", "aff_country_unique": "France;China;United Kingdom" }, { "title": "Best Model Identification: A Rested Bandit Formulation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9257", "id": "9257", "proceeding": "http://proceedings.mlr.press/v139/cella21a.html", "slides": "/media/icml-2021/Slides/9257.pdf", "author_site": "Leonardo Cella, Massimiliano Pontil, Claudio Gentile", "author": "Leonardo Cella; Massimiliano Pontil; Claudio Gentile", "abstract": "We introduce and analyze a best arm identification problem in the rested bandit setting, wherein arms are themselves learning algorithms whose expected losses decrease with the number of times the arm has been played. The shape of the expected loss functions is similar across arms, and is assumed to be available up to unknown parameters that have to be learned on the fly. We define a novel notion of regret for this problem, where we compare to the policy that always plays the arm having the smallest expected loss at the end of the game. We analyze an arm elimination algorithm whose regret vanishes as the time horizon increases. The actual rate of convergence depends in a detailed way on the postulated functional form of the expected losses. We complement our analysis with lower bounds, indicating strengths and limitations of the proposed solution.", "bibtex": "@InProceedings{pmlr-v139-cella21a,\n title = \t {Best Model Identification: A Rested Bandit Formulation},\n author = {Cella, Leonardo and Pontil, Massimiliano and Gentile, Claudio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1362--1372},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cella21a/cella21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cella21a.html},\n abstract = \t {We introduce and analyze a best arm identification problem in the rested bandit setting, wherein arms are themselves learning algorithms whose expected losses decrease with the number of times the arm has been played. The shape of the expected loss functions is similar across arms, and is assumed to be available up to unknown parameters that have to be learned on the fly. We define a novel notion of regret for this problem, where we compare to the policy that always plays the arm having the smallest expected loss at the end of the game. We analyze an arm elimination algorithm whose regret vanishes as the time horizon increases. The actual rate of convergence depends in a detailed way on the postulated functional form of the expected losses. We complement our analysis with lower bounds, indicating strengths and limitations of the proposed solution.}\n}", "pdf": "http://proceedings.mlr.press/v139/cella21a/cella21a.pdf", "supp": "", "pdf_size": 347488, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11169976872330361027&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Italian Institute of Technology, Genoa, Italy+University College London, United Kingdom; Italian Institute of Technology, Genoa, Italy+University College London, United Kingdom; Google Research, New York, USA", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cella21a.html", "aff_unique_index": "0+1;0+1;2", "aff_unique_norm": "Italian Institute of Technology;University College London;Google", "aff_unique_dep": ";;Google Research", "aff_unique_url": "https://www.iit.it;https://www.ucl.ac.uk;https://research.google", "aff_unique_abbr": "IIT;UCL;Google Research", "aff_campus_unique_index": "0;0;2", "aff_campus_unique": "Genoa;;New York", "aff_country_unique_index": "0+1;0+1;2", "aff_country_unique": "Italy;United Kingdom;United States" }, { "title": "Better Training using Weight-Constrained Stochastic Dynamics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10583", "id": "10583", "proceeding": "http://proceedings.mlr.press/v139/leimkuhler21a.html", "slides": "", "author_site": "Benedict Leimkuhler, Tiffany Vlaar, Timoth\u00e9e Pouchon, Amos Storkey", "author": "Benedict Leimkuhler; Tiffany J Vlaar; Timoth\u00e9e Pouchon; Amos Storkey", "abstract": "We employ constraints to control the parameter space of deep neural networks throughout training. The use of customised, appropriately designed constraints can reduce the vanishing/exploding gradients problem, improve smoothness of classification boundaries, control weight magnitudes and stabilize deep neural networks, and thus enhance the robustness of training algorithms and the generalization capabilities of neural networks. We provide a general approach to efficiently incorporate constraints into a stochastic gradient Langevin framework, allowing enhanced exploration of the loss landscape. We also present specific examples of constrained training methods motivated by orthogonality preservation for weight matrices and explicit weight normalizations. Discretization schemes are provided both for the overdamped formulation of Langevin dynamics and the underdamped form, in which momenta further improve sampling efficiency. These optimisation schemes can be used directly, without needing to adapt neural network architecture design choices or to modify the objective with regularization terms, and see performance improvements in classification tasks.", "bibtex": "@InProceedings{pmlr-v139-leimkuhler21a,\n title = \t {Better Training using Weight-Constrained Stochastic Dynamics},\n author = {Leimkuhler, Benedict and Vlaar, Tiffany J and Pouchon, Timoth{\\'e}e and Storkey, Amos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6200--6211},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/leimkuhler21a/leimkuhler21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/leimkuhler21a.html},\n abstract = \t {We employ constraints to control the parameter space of deep neural networks throughout training. The use of customised, appropriately designed constraints can reduce the vanishing/exploding gradients problem, improve smoothness of classification boundaries, control weight magnitudes and stabilize deep neural networks, and thus enhance the robustness of training algorithms and the generalization capabilities of neural networks. We provide a general approach to efficiently incorporate constraints into a stochastic gradient Langevin framework, allowing enhanced exploration of the loss landscape. We also present specific examples of constrained training methods motivated by orthogonality preservation for weight matrices and explicit weight normalizations. Discretization schemes are provided both for the overdamped formulation of Langevin dynamics and the underdamped form, in which momenta further improve sampling efficiency. These optimisation schemes can be used directly, without needing to adapt neural network architecture design choices or to modify the objective with regularization terms, and see performance improvements in classification tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/leimkuhler21a/leimkuhler21a.pdf", "supp": "", "pdf_size": 7112873, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16942829728118781879&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, University of Edinburgh, United Kingdom+Department of Informatics, University of Edinburgh, United Kingdom; Department of Mathematics, University of Edinburgh, United Kingdom+Department of Informatics, University of Edinburgh, United Kingdom; Department of Mathematics, University of Edinburgh, United Kingdom+Department of Informatics, University of Edinburgh, United Kingdom; Department of Informatics, University of Edinburgh, United Kingdom", "aff_domain": "ed.ac.uk; ; ; ", "email": "ed.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/leimkuhler21a.html", "aff_unique_index": "0+0;0+0;0+0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Edinburgh", "aff_country_unique_index": "0+0;0+0;0+0;0", "aff_country_unique": "United Kingdom" }, { "title": "Beyond $log^2(T)$ regret for decentralized bandits in matching markets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8743", "id": "8743", "proceeding": "http://proceedings.mlr.press/v139/basu21a.html", "slides": "", "author_site": "Soumya Basu, Karthik Abinav Sankararaman, Abishek Sankararaman", "author": "Soumya Basu; Karthik Abinav Sankararaman; Abishek Sankararaman", "abstract": "We design decentralized algorithms for regret minimization in the two sided matching market with one-sided bandit feedback that significantly improves upon the prior works (Liu et al.\\,2020a, Sankararaman et al.\\,2020, Liu et al.\\,2020b). First, for general markets, for any $\\varepsilon > 0$, we design an algorithm that achieves a $O(\\log^{1+\\varepsilon}(T))$ regret to the agent-optimal stable matching, with unknown time horizon $T$, improving upon the $O(\\log^{2}(T))$ regret achieved in (Liu et al.\\,2020b). Second, we provide the optimal $\\Theta(\\log(T))$ agent-optimal regret for markets satisfying {\\em uniqueness consistency} \u2013 markets where leaving participants don\u2019t alter the original stable matching. Previously, $\\Theta(\\log(T))$ regret was achievable (Sankararaman et al.\\,2020, Liu et al.\\,2020b) in the much restricted {\\em serial dictatorship} setting, when all arms have the same preference over the agents. We propose a phase based algorithm, where in each phase, besides deleting the globally communicated dominated arms the agents locally delete arms with which they collide often. This \\emph{local deletion} is pivotal in breaking deadlocks arising from rank heterogeneity of agents across arms. We further demonstrate superiority of our algorithm over existing works through simulations.", "bibtex": "@InProceedings{pmlr-v139-basu21a,\n title = \t {Beyond $log^2(T)$ regret for decentralized bandits in matching markets},\n author = {Basu, Soumya and Sankararaman, Karthik Abinav and Sankararaman, Abishek},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {705--715},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/basu21a/basu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/basu21a.html},\n abstract = \t {We design decentralized algorithms for regret minimization in the two sided matching market with one-sided bandit feedback that significantly improves upon the prior works (Liu et al.\\,2020a, Sankararaman et al.\\,2020, Liu et al.\\,2020b). First, for general markets, for any $\\varepsilon > 0$, we design an algorithm that achieves a $O(\\log^{1+\\varepsilon}(T))$ regret to the agent-optimal stable matching, with unknown time horizon $T$, improving upon the $O(\\log^{2}(T))$ regret achieved in (Liu et al.\\,2020b). Second, we provide the optimal $\\Theta(\\log(T))$ agent-optimal regret for markets satisfying {\\em uniqueness consistency} \u2013 markets where leaving participants don\u2019t alter the original stable matching. Previously, $\\Theta(\\log(T))$ regret was achievable (Sankararaman et al.\\,2020, Liu et al.\\,2020b) in the much restricted {\\em serial dictatorship} setting, when all arms have the same preference over the agents. We propose a phase based algorithm, where in each phase, besides deleting the globally communicated dominated arms the agents locally delete arms with which they collide often. This \\emph{local deletion} is pivotal in breaking deadlocks arising from rank heterogeneity of agents across arms. We further demonstrate superiority of our algorithm over existing works through simulations.}\n}", "pdf": "http://proceedings.mlr.press/v139/basu21a/basu21a.pdf", "supp": "", "pdf_size": 6335223, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2821581784542193910&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google, Mountain View, CA, USA; Facebook, Menlo Park, CA, USA; Amazon, Palo Alto, CA, USA", "aff_domain": "utexas.edu;gmail.com;gmail.com", "email": "utexas.edu;gmail.com;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/basu21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Google;Meta;Amazon", "aff_unique_dep": "Google;Facebook;Amazon", "aff_unique_url": "https://www.google.com;https://www.facebook.com;https://www.amazon.com", "aff_unique_abbr": "Google;FB;Amazon", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Mountain View;Menlo Park;Palo Alto", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Beyond Variance Reduction: Understanding the True Impact of Baselines on Policy Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9757", "id": "9757", "proceeding": "http://proceedings.mlr.press/v139/chung21a.html", "slides": "", "author_site": "Wesley Chung, Valentin Thomas, Marlos C. Machado, Nicolas Le Roux", "author": "Wesley Chung; Valentin Thomas; Marlos C. Machado; Nicolas Le Roux", "abstract": "Bandit and reinforcement learning (RL) problems can often be framed as optimization problems where the goal is to maximize average performance while having access only to stochastic estimates of the true gradient. Traditionally, stochastic optimization theory predicts that learning dynamics are governed by the curvature of the loss function and the noise of the gradient estimates. In this paper we demonstrate that the standard view is too limited for bandit and RL problems. To allow our analysis to be interpreted in light of multi-step MDPs, we focus on techniques derived from stochastic optimization principles\u00a0(e.g., natural policy gradient and EXP3) and we show that some standard assumptions from optimization theory are violated in these problems. We present theoretical results showing that, at least for bandit problems, curvature and noise are not sufficient to explain the learning dynamics and that seemingly innocuous choices like the baseline can determine whether an algorithm converges. These theoretical findings match our empirical evaluation, which we extend to multi-state MDPs.", "bibtex": "@InProceedings{pmlr-v139-chung21a,\n title = \t {Beyond Variance Reduction: Understanding the True Impact of Baselines on Policy Optimization},\n author = {Chung, Wesley and Thomas, Valentin and Machado, Marlos C. and Roux, Nicolas Le},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1999--2009},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chung21a/chung21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chung21a.html},\n abstract = \t {Bandit and reinforcement learning (RL) problems can often be framed as optimization problems where the goal is to maximize average performance while having access only to stochastic estimates of the true gradient. Traditionally, stochastic optimization theory predicts that learning dynamics are governed by the curvature of the loss function and the noise of the gradient estimates. In this paper we demonstrate that the standard view is too limited for bandit and RL problems. To allow our analysis to be interpreted in light of multi-step MDPs, we focus on techniques derived from stochastic optimization principles\u00a0(e.g., natural policy gradient and EXP3) and we show that some standard assumptions from optimization theory are violated in these problems. We present theoretical results showing that, at least for bandit problems, curvature and noise are not sufficient to explain the learning dynamics and that seemingly innocuous choices like the baseline can determine whether an algorithm converges. These theoretical findings match our empirical evaluation, which we extend to multi-state MDPs.}\n}", "pdf": "http://proceedings.mlr.press/v139/chung21a/chung21a.pdf", "supp": "", "pdf_size": 3868367, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12775611299590119505&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Mila, McGill University + Google Research, Brain Team; Mila, University of Montreal + Google Research, Brain Team; DeepMind + Amii, University of Alberta + Google Research; Google Research, Brain Team", "aff_domain": "gmail.com;gmail.com;deepmind.com;leroux.name", "email": "gmail.com;gmail.com;deepmind.com;leroux.name", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chung21a.html", "aff_unique_index": "0+1;2+1;3+4+1;1", "aff_unique_norm": "McGill University;Google;University of Montreal;DeepMind;University of Alberta", "aff_unique_dep": "Mila;Google Research;Mila;;Amii", "aff_unique_url": "https://www.mcgill.ca;https://research.google;https://www.mila.quebec;https://deepmind.com;https://www.ualberta.ca", "aff_unique_abbr": "McGill;Google;Mila;DeepMind;UAlberta", "aff_campus_unique_index": "0+1;0+1;1;1", "aff_campus_unique": "Montreal;Mountain View;", "aff_country_unique_index": "0+1;0+1;2+0+1;1", "aff_country_unique": "Canada;United States;United Kingdom" }, { "title": "Beyond the Pareto Efficient Frontier: Constraint Active Search for Multiobjective Experimental Design", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8409", "id": "8409", "proceeding": "http://proceedings.mlr.press/v139/malkomes21a.html", "slides": "/media/icml-2021/Slides/8409_Putv81d.pdf", "author_site": "Gustavo Malkomes, Bolong Cheng, Eric Lee, Michael McCourt", "author": "Gustavo Malkomes; Bolong Cheng; Eric H Lee; Mike Mccourt", "abstract": "Many problems in engineering design and simulation require balancing competing objectives under the presence of uncertainty. Sample-efficient multiobjective optimization methods focus on the objective function values in metric space and ignore the sampling behavior of the design configurations in parameter space. Consequently, they may provide little actionable insight on how to choose designs in the presence of metric uncertainty or limited precision when implementing a chosen design. We propose a new formulation that accounts for the importance of the parameter space and is thus more suitable for multiobjective design problems; instead of searching for the Pareto-efficient frontier, we solicit the desired minimum performance thresholds on all objectives to define regions of satisfaction. We introduce an active search algorithm called Expected Coverage Improvement (ECI) to efficiently discover the region of satisfaction and simultaneously sample diverse acceptable configurations. We demonstrate our algorithm on several design and simulation domains: mechanical design, additive manufacturing, medical monitoring, and plasma physics.", "bibtex": "@InProceedings{pmlr-v139-malkomes21a,\n title = \t {Beyond the Pareto Efficient Frontier: Constraint Active Search for Multiobjective Experimental Design},\n author = {Malkomes, Gustavo and Cheng, Bolong and Lee, Eric H and Mccourt, Mike},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7423--7434},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/malkomes21a/malkomes21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/malkomes21a.html},\n abstract = \t {Many problems in engineering design and simulation require balancing competing objectives under the presence of uncertainty. Sample-efficient multiobjective optimization methods focus on the objective function values in metric space and ignore the sampling behavior of the design configurations in parameter space. Consequently, they may provide little actionable insight on how to choose designs in the presence of metric uncertainty or limited precision when implementing a chosen design. We propose a new formulation that accounts for the importance of the parameter space and is thus more suitable for multiobjective design problems; instead of searching for the Pareto-efficient frontier, we solicit the desired minimum performance thresholds on all objectives to define regions of satisfaction. We introduce an active search algorithm called Expected Coverage Improvement (ECI) to efficiently discover the region of satisfaction and simultaneously sample diverse acceptable configurations. We demonstrate our algorithm on several design and simulation domains: mechanical design, additive manufacturing, medical monitoring, and plasma physics.}\n}", "pdf": "http://proceedings.mlr.press/v139/malkomes21a/malkomes21a.pdf", "supp": "", "pdf_size": 2833191, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17436141080235339176&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "SigOpt, an Intel company, San Francisco, CA, USA; SigOpt, an Intel company, San Francisco, CA, USA; SigOpt, an Intel company, San Francisco, CA, USA; SigOpt, an Intel company, San Francisco, CA, USA", "aff_domain": "intel.com; ; ; ", "email": "intel.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/malkomes21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "SigOpt", "aff_unique_dep": "", "aff_unique_url": "https://www.sigopt.com", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Bias-Free Scalable Gaussian Processes via Randomized Truncations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10577", "id": "10577", "proceeding": "http://proceedings.mlr.press/v139/potapczynski21a.html", "slides": "/media/icml-2021/Slides/10577.pdf", "author_site": "Andres Potapczynski, Luhuan Wu, Dan Biderman, Geoff Pleiss, John Cunningham", "author": "Andres Potapczynski; Luhuan Wu; Dan Biderman; Geoff Pleiss; John P Cunningham", "abstract": "Scalable Gaussian Process methods are computationally attractive, yet introduce modeling biases that require rigorous study. This paper analyzes two common techniques: early truncated conjugate gradients (CG) and random Fourier features (RFF). We find that both methods introduce a systematic bias on the learned hyperparameters: CG tends to underfit while RFF tends to overfit. We address these issues using randomized truncation estimators that eliminate bias in exchange for increased variance. In the case of RFF, we show that the bias-to-variance conversion is indeed a trade-off: the additional variance proves detrimental to optimization. However, in the case of CG, our unbiased learning procedure meaningfully outperforms its biased counterpart with minimal additional computation. Our code is available at https://github.com/ cunningham-lab/RTGPS.", "bibtex": "@InProceedings{pmlr-v139-potapczynski21a,\n title = \t {Bias-Free Scalable Gaussian Processes via Randomized Truncations},\n author = {Potapczynski, Andres and Wu, Luhuan and Biderman, Dan and Pleiss, Geoff and Cunningham, John P},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8609--8619},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/potapczynski21a/potapczynski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/potapczynski21a.html},\n abstract = \t {Scalable Gaussian Process methods are computationally attractive, yet introduce modeling biases that require rigorous study. This paper analyzes two common techniques: early truncated conjugate gradients (CG) and random Fourier features (RFF). We find that both methods introduce a systematic bias on the learned hyperparameters: CG tends to underfit while RFF tends to overfit. We address these issues using randomized truncation estimators that eliminate bias in exchange for increased variance. In the case of RFF, we show that the bias-to-variance conversion is indeed a trade-off: the additional variance proves detrimental to optimization. However, in the case of CG, our unbiased learning procedure meaningfully outperforms its biased counterpart with minimal additional computation. Our code is available at https://github.com/ cunningham-lab/RTGPS.}\n}", "pdf": "http://proceedings.mlr.press/v139/potapczynski21a/potapczynski21a.pdf", "supp": "", "pdf_size": 1107830, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5236118263143002712&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Zuckerman Institute, Columbia University; Statistics Department, Columbia University; Zuckerman Institute, Columbia University; Zuckerman Institute, Columbia University; Zuckerman Institute, Columbia University + Statistics Department, Columbia University", "aff_domain": "columbia.edu;columbia.edu;columbia.edu; ; ", "email": "columbia.edu;columbia.edu;columbia.edu; ; ", "github": "https://github.com/cunningham-lab/RTGPS", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/potapczynski21a.html", "aff_unique_index": "0;0;0;0;0+0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "Zuckerman Institute", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Bias-Robust Bayesian Optimization via Dueling Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8493", "id": "8493", "proceeding": "http://proceedings.mlr.press/v139/kirschner21a.html", "slides": "", "author_site": "Johannes Kirschner, Andreas Krause", "author": "Johannes Kirschner; Andreas Krause", "abstract": "We consider Bayesian optimization in settings where observations can be adversarially biased, for example by an uncontrolled hidden confounder. Our first contribution is a reduction of the confounded setting to the dueling bandit model. Then we propose a novel approach for dueling bandits based on information-directed sampling (IDS). Thereby, we obtain the first efficient kernelized algorithm for dueling bandits that comes with cumulative regret guarantees. Our analysis further generalizes a previously proposed semi-parametric linear bandit model to non-linear reward functions, and uncovers interesting links to doubly-robust estimation.", "bibtex": "@InProceedings{pmlr-v139-kirschner21a,\n title = \t {Bias-Robust Bayesian Optimization via Dueling Bandits},\n author = {Kirschner, Johannes and Krause, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5595--5605},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kirschner21a/kirschner21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kirschner21a.html},\n abstract = \t {We consider Bayesian optimization in settings where observations can be adversarially biased, for example by an uncontrolled hidden confounder. Our first contribution is a reduction of the confounded setting to the dueling bandit model. Then we propose a novel approach for dueling bandits based on information-directed sampling (IDS). Thereby, we obtain the first efficient kernelized algorithm for dueling bandits that comes with cumulative regret guarantees. Our analysis further generalizes a previously proposed semi-parametric linear bandit model to non-linear reward functions, and uncovers interesting links to doubly-robust estimation.}\n}", "pdf": "http://proceedings.mlr.press/v139/kirschner21a/kirschner21a.pdf", "supp": "", "pdf_size": 3731647, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18349555817155605966&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich", "aff_domain": "inf.ethz.ch; ", "email": "inf.ethz.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kirschner21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Bias-Variance Reduced Local SGD for Less Heterogeneous Federated Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9507", "id": "9507", "proceeding": "http://proceedings.mlr.press/v139/murata21a.html", "slides": "", "author_site": "Tomoya Murata, Taiji Suzuki", "author": "Tomoya Murata; Taiji Suzuki", "abstract": "Recently, local SGD has got much attention and been extensively studied in the distributed learning community to overcome the communication bottleneck problem. However, the superiority of local SGD to minibatch SGD only holds in quite limited situations. In this paper, we study a new local algorithm called Bias-Variance Reduced Local SGD (BVR-L-SGD) for nonconvex distributed optimization. Algorithmically, our proposed bias and variance reduced local gradient estimator fully utilizes small second-order heterogeneity of local objectives and suggests randomly picking up one of the local models instead of taking the average of them when workers are synchronized. Theoretically, under small heterogeneity of local objectives, we show that BVR-L-SGD achieves better communication complexity than both the previous non-local and local methods under mild conditions, and particularly BVR-L-SGD is the first method that breaks the barrier of communication complexity $\\Theta(1/\\varepsilon)$ for general nonconvex smooth objectives when the heterogeneity is small and the local computation budget is large. Numerical results are given to verify the theoretical findings and give empirical evidence of the superiority of our method.", "bibtex": "@InProceedings{pmlr-v139-murata21a,\n title = \t {Bias-Variance Reduced Local SGD for Less Heterogeneous Federated Learning},\n author = {Murata, Tomoya and Suzuki, Taiji},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7872--7881},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/murata21a/murata21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/murata21a.html},\n abstract = \t {Recently, local SGD has got much attention and been extensively studied in the distributed learning community to overcome the communication bottleneck problem. However, the superiority of local SGD to minibatch SGD only holds in quite limited situations. In this paper, we study a new local algorithm called Bias-Variance Reduced Local SGD (BVR-L-SGD) for nonconvex distributed optimization. Algorithmically, our proposed bias and variance reduced local gradient estimator fully utilizes small second-order heterogeneity of local objectives and suggests randomly picking up one of the local models instead of taking the average of them when workers are synchronized. Theoretically, under small heterogeneity of local objectives, we show that BVR-L-SGD achieves better communication complexity than both the previous non-local and local methods under mild conditions, and particularly BVR-L-SGD is the first method that breaks the barrier of communication complexity $\\Theta(1/\\varepsilon)$ for general nonconvex smooth objectives when the heterogeneity is small and the local computation budget is large. Numerical results are given to verify the theoretical findings and give empirical evidence of the superiority of our method.}\n}", "pdf": "http://proceedings.mlr.press/v139/murata21a/murata21a.pdf", "supp": "", "pdf_size": 8898850, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8236622375432590237&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "NTT DATA Mathematical Systems Inc., Tokyo, Japan + Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan; Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan + Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan", "aff_domain": "msi.co.jp;mist.i.u-tokyo.ac.jp", "email": "msi.co.jp;mist.i.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/murata21a.html", "aff_unique_index": "0+1;1+2", "aff_unique_norm": "NTT DATA Mathematical Systems Inc.;University of Tokyo;RIKEN", "aff_unique_dep": ";Graduate School of Information Science and Technology;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.ntt-data.com/;https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": ";UTokyo;RIKEN", "aff_campus_unique_index": "1;1+1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Bilevel Optimization: Convergence Analysis and Enhanced Design", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9473", "id": "9473", "proceeding": "http://proceedings.mlr.press/v139/ji21c.html", "slides": "/media/icml-2021/Slides/9473.pdf", "author_site": "Kaiyi Ji, Junjie Yang, Yingbin LIANG", "author": "Kaiyi Ji; Junjie Yang; Yingbin Liang", "abstract": "Bilevel optimization has arisen as a powerful tool for many machine learning problems such as meta-learning, hyperparameter optimization, and reinforcement learning. In this paper, we investigate the nonconvex-strongly-convex bilevel optimization problem. For deterministic bilevel optimization, we provide a comprehensive convergence rate analysis for two popular algorithms respectively based on approximate implicit differentiation (AID) and iterative differentiation (ITD). For the AID-based method, we orderwisely improve the previous convergence rate analysis due to a more practical parameter selection as well as a warm start strategy, and for the ITD-based method we establish the first theoretical convergence rate. Our analysis also provides a quantitative comparison between ITD and AID based approaches. For stochastic bilevel optimization, we propose a novel algorithm named stocBiO, which features a sample-efficient hypergradient estimator using efficient Jacobian- and Hessian-vector product computations. We provide the convergence rate guarantee for stocBiO, and show that stocBiO outperforms the best known computational complexities orderwisely with respect to the condition number $\\kappa$ and the target accuracy $\\epsilon$. We further validate our theoretical results and demonstrate the efficiency of bilevel optimization algorithms by the experiments on meta-learning and hyperparameter optimization.", "bibtex": "@InProceedings{pmlr-v139-ji21c,\n title = \t {Bilevel Optimization: Convergence Analysis and Enhanced Design},\n author = {Ji, Kaiyi and Yang, Junjie and Liang, Yingbin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4882--4892},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ji21c/ji21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/ji21c.html},\n abstract = \t {Bilevel optimization has arisen as a powerful tool for many machine learning problems such as meta-learning, hyperparameter optimization, and reinforcement learning. In this paper, we investigate the nonconvex-strongly-convex bilevel optimization problem. For deterministic bilevel optimization, we provide a comprehensive convergence rate analysis for two popular algorithms respectively based on approximate implicit differentiation (AID) and iterative differentiation (ITD). For the AID-based method, we orderwisely improve the previous convergence rate analysis due to a more practical parameter selection as well as a warm start strategy, and for the ITD-based method we establish the first theoretical convergence rate. Our analysis also provides a quantitative comparison between ITD and AID based approaches. For stochastic bilevel optimization, we propose a novel algorithm named stocBiO, which features a sample-efficient hypergradient estimator using efficient Jacobian- and Hessian-vector product computations. We provide the convergence rate guarantee for stocBiO, and show that stocBiO outperforms the best known computational complexities orderwisely with respect to the condition number $\\kappa$ and the target accuracy $\\epsilon$. We further validate our theoretical results and demonstrate the efficiency of bilevel optimization algorithms by the experiments on meta-learning and hyperparameter optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/ji21c/ji21c.pdf", "supp": "", "pdf_size": 6403436, "gs_citation": 299, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14240180646297063660&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, The Ohio State University; Department of Electrical and Computer Engineering, The Ohio State University; Department of Electrical and Computer Engineering, The Ohio State University", "aff_domain": "osu.edu; ; ", "email": "osu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ji21c.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Ohio State University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.osu.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Bilinear Classes: A Structural Framework for Provable Generalization in RL", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9125", "id": "9125", "proceeding": "http://proceedings.mlr.press/v139/du21a.html", "slides": "/media/icml-2021/Slides/9125.pdf", "author_site": "Simon Du, Sham Kakade, Jason Lee, Shachar Lovett, Gaurav Mahajan, Wen Sun, Ruosong Wang", "author": "Simon Du; Sham Kakade; Jason Lee; Shachar Lovett; Gaurav Mahajan; Wen Sun; Ruosong Wang", "abstract": "This work introduces Bilinear Classes, a new structural framework, which permit generalization in reinforcement learning in a wide variety of settings through the use of function approximation. The framework incorporates nearly all existing models in which a polynomial sample complexity is achievable, and, notably, also includes new models, such as the Linear Q*/V* model in which both the optimal Q-function and the optimal V-function are linear in some known feature space. Our main result provides an RL algorithm which has polynomial sample complexity for Bilinear Classes; notably, this sample complexity is stated in terms of a reduction to the generalization error of an underlying supervised learning sub-problem. These bounds nearly match the best known sample complexity bounds for existing models. Furthermore, this framework also extends to the infinite dimensional (RKHS) setting: for the the Linear Q*/V* model, linear MDPs, and linear mixture MDPs, we provide sample complexities that have no explicit dependence on the explicit feature dimension (which could be infinite), but instead depends only on information theoretic quantities.", "bibtex": "@InProceedings{pmlr-v139-du21a,\n title = \t {Bilinear Classes: A Structural Framework for Provable Generalization in RL},\n author = {Du, Simon and Kakade, Sham and Lee, Jason and Lovett, Shachar and Mahajan, Gaurav and Sun, Wen and Wang, Ruosong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2826--2836},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21a/du21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21a.html},\n abstract = \t {This work introduces Bilinear Classes, a new structural framework, which permit generalization in reinforcement learning in a wide variety of settings through the use of function approximation. The framework incorporates nearly all existing models in which a polynomial sample complexity is achievable, and, notably, also includes new models, such as the Linear Q*/V* model in which both the optimal Q-function and the optimal V-function are linear in some known feature space. Our main result provides an RL algorithm which has polynomial sample complexity for Bilinear Classes; notably, this sample complexity is stated in terms of a reduction to the generalization error of an underlying supervised learning sub-problem. These bounds nearly match the best known sample complexity bounds for existing models. Furthermore, this framework also extends to the infinite dimensional (RKHS) setting: for the the Linear Q*/V* model, linear MDPs, and linear mixture MDPs, we provide sample complexities that have no explicit dependence on the explicit feature dimension (which could be infinite), but instead depends only on information theoretic quantities.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21a/du21a.pdf", "supp": "", "pdf_size": 389175, "gs_citation": 256, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4739348112793122814&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Washington; Princeton University; University of California, San Diego; University of California, San Diego; University of California, San Diego; Cornell University; Carnegie Mellon University", "aff_domain": "uw.edu;princeton.edu;princeton.edu;ucsd.edu;eng.ucsd.edu;cornell.edu;andrew.cmu.edu", "email": "uw.edu;princeton.edu;princeton.edu;ucsd.edu;eng.ucsd.edu;cornell.edu;andrew.cmu.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/du21a.html", "aff_unique_index": "0;1;2;2;2;3;4", "aff_unique_norm": "University of Washington;Princeton University;University of California, San Diego;Cornell University;Carnegie Mellon University", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.washington.edu;https://www.princeton.edu;https://www.ucsd.edu;https://www.cornell.edu;https://www.cmu.edu", "aff_unique_abbr": "UW;Princeton;UCSD;Cornell;CMU", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";San Diego", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Binary Classification from Multiple Unlabeled Datasets via Surrogate Set Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9865", "id": "9865", "proceeding": "http://proceedings.mlr.press/v139/lu21c.html", "slides": "", "author_site": "Nan Lu, Shida Lei, Gang Niu, Issei Sato, Masashi Sugiyama", "author": "Nan Lu; Shida Lei; Gang Niu; Issei Sato; Masashi Sugiyama", "abstract": "To cope with high annotation costs, training a classifier only from weakly supervised data has attracted a great deal of attention these days. Among various approaches, strengthening supervision from completely unsupervised classification is a promising direction, which typically employs class priors as the only supervision and trains a binary classifier from unlabeled (U) datasets. While existing risk-consistent methods are theoretically grounded with high flexibility, they can learn only from two U sets. In this paper, we propose a new approach for binary classification from $m$ U-sets for $m\\ge2$. Our key idea is to consider an auxiliary classification task called surrogate set classification (SSC), which is aimed at predicting from which U set each observed sample is drawn. SSC can be solved by a standard (multi-class) classification method, and we use the SSC solution to obtain the final binary classifier through a certain linear-fractional transformation. We built our method in a flexible and efficient end-to-end deep learning framework and prove it to be classifier-consistent. Through experiments, we demonstrate the superiority of our proposed method over state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-lu21c,\n title = \t {Binary Classification from Multiple Unlabeled Datasets via Surrogate Set Classification},\n author = {Lu, Nan and Lei, Shida and Niu, Gang and Sato, Issei and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7134--7144},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lu21c/lu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/lu21c.html},\n abstract = \t {To cope with high annotation costs, training a classifier only from weakly supervised data has attracted a great deal of attention these days. Among various approaches, strengthening supervision from completely unsupervised classification is a promising direction, which typically employs class priors as the only supervision and trains a binary classifier from unlabeled (U) datasets. While existing risk-consistent methods are theoretically grounded with high flexibility, they can learn only from two U sets. In this paper, we propose a new approach for binary classification from $m$ U-sets for $m\\ge2$. Our key idea is to consider an auxiliary classification task called surrogate set classification (SSC), which is aimed at predicting from which U set each observed sample is drawn. SSC can be solved by a standard (multi-class) classification method, and we use the SSC solution to obtain the final binary classifier through a certain linear-fractional transformation. We built our method in a flexible and efficient end-to-end deep learning framework and prove it to be classifier-consistent. Through experiments, we demonstrate the superiority of our proposed method over state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/lu21c/lu21c.pdf", "supp": "", "pdf_size": 2537567, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8249584082478727878&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "The University of Tokyo; The University of Tokyo; RIKEN; The University of Tokyo + RIKEN; RIKEN", "aff_domain": "ms.k.u-tokyo.ac.jp;is.s.u-tokyo.ac.jp; ; ; ", "email": "ms.k.u-tokyo.ac.jp;is.s.u-tokyo.ac.jp; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lu21c.html", "aff_unique_index": "0;0;1;0+1;1", "aff_unique_norm": "University of Tokyo;RIKEN", "aff_unique_dep": ";", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "UTokyo;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0+0;0", "aff_country_unique": "Japan" }, { "title": "Black-box density function estimation using recursive partitioning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9471", "id": "9471", "proceeding": "http://proceedings.mlr.press/v139/bodin21a.html", "slides": "", "author_site": "Erik Bodin, Zhenwen Dai, Neill Campbell, Carl Henrik Ek", "author": "Erik Bodin; Zhenwen Dai; Neill Campbell; Carl Henrik Ek", "abstract": "We present a novel approach to Bayesian inference and general Bayesian computation that is defined through a sequential decision loop. Our method defines a recursive partitioning of the sample space. It neither relies on gradients nor requires any problem-specific tuning, and is asymptotically exact for any density function with a bounded domain. The output is an approximation to the whole density function including the normalisation constant, via partitions organised in efficient data structures. Such approximations may be used for evidence estimation or fast posterior sampling, but also as building blocks to treat a larger class of estimation problems. The algorithm shows competitive performance to recent state-of-the-art methods on synthetic and real-world problems including parameter inference for gravitational-wave physics.", "bibtex": "@InProceedings{pmlr-v139-bodin21a,\n title = \t {Black-box density function estimation using recursive partitioning},\n author = {Bodin, Erik and Dai, Zhenwen and Campbell, Neill and Ek, Carl Henrik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1015--1025},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bodin21a/bodin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bodin21a.html},\n abstract = \t {We present a novel approach to Bayesian inference and general Bayesian computation that is defined through a sequential decision loop. Our method defines a recursive partitioning of the sample space. It neither relies on gradients nor requires any problem-specific tuning, and is asymptotically exact for any density function with a bounded domain. The output is an approximation to the whole density function including the normalisation constant, via partitions organised in efficient data structures. Such approximations may be used for evidence estimation or fast posterior sampling, but also as building blocks to treat a larger class of estimation problems. The algorithm shows competitive performance to recent state-of-the-art methods on synthetic and real-world problems including parameter inference for gravitational-wave physics.}\n}", "pdf": "http://proceedings.mlr.press/v139/bodin21a/bodin21a.pdf", "supp": "", "pdf_size": 2650636, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17001427494872038467&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Bristol, United Kingdom; Spotify, United Kingdom; University of Bath, United Kingdom; University of Cambridge, United Kingdom", "aff_domain": "erikbodin.com; ; ; ", "email": "erikbodin.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/bodin21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of Bristol;Spotify;University of Bath;University of Cambridge", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.bristol.ac.uk;https://www.spotify.com;https://www.bath.ac.uk;https://www.cam.ac.uk", "aff_unique_abbr": "Bristol;Spotify;Bath;Cambridge", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Blind Pareto Fairness and Subgroup Robustness", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9557", "id": "9557", "proceeding": "http://proceedings.mlr.press/v139/martinez21a.html", "slides": "", "author_site": "Natalia Martinez Gil, Martin Bertran, Afroditi Papadaki, Miguel Rodrigues, Guillermo Sapiro", "author": "Natalia L Martinez; Martin A Bertran; Afroditi Papadaki; Miguel Rodrigues; Guillermo Sapiro", "abstract": "Much of the work in the field of group fairness addresses disparities between predefined groups based on protected features such as gender, age, and race, which need to be available at train, and often also at test, time. These approaches are static and retrospective, since algorithms designed to protect groups identified a priori cannot anticipate and protect the needs of different at-risk groups in the future. In this work we analyze the space of solutions for worst-case fairness beyond demographics, and propose Blind Pareto Fairness (BPF), a method that leverages no-regret dynamics to recover a fair minimax classifier that reduces worst-case risk of any potential subgroup of sufficient size, and guarantees that the remaining population receives the best possible level of service. BPF addresses fairness beyond demographics, that is, it does not rely on predefined notions of at-risk groups, neither at train nor at test time. Our experimental results show that the proposed framework improves worst-case risk in multiple standard datasets, while simultaneously providing better levels of service for the remaining population. The code is available at github.com/natalialmg/BlindParetoFairness", "bibtex": "@InProceedings{pmlr-v139-martinez21a,\n title = \t {Blind Pareto Fairness and Subgroup Robustness},\n author = {Martinez, Natalia L and Bertran, Martin A and Papadaki, Afroditi and Rodrigues, Miguel and Sapiro, Guillermo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7492--7501},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/martinez21a/martinez21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/martinez21a.html},\n abstract = \t {Much of the work in the field of group fairness addresses disparities between predefined groups based on protected features such as gender, age, and race, which need to be available at train, and often also at test, time. These approaches are static and retrospective, since algorithms designed to protect groups identified a priori cannot anticipate and protect the needs of different at-risk groups in the future. In this work we analyze the space of solutions for worst-case fairness beyond demographics, and propose Blind Pareto Fairness (BPF), a method that leverages no-regret dynamics to recover a fair minimax classifier that reduces worst-case risk of any potential subgroup of sufficient size, and guarantees that the remaining population receives the best possible level of service. BPF addresses fairness beyond demographics, that is, it does not rely on predefined notions of at-risk groups, neither at train nor at test time. Our experimental results show that the proposed framework improves worst-case risk in multiple standard datasets, while simultaneously providing better levels of service for the remaining population. The code is available at github.com/natalialmg/BlindParetoFairness}\n}", "pdf": "http://proceedings.mlr.press/v139/martinez21a/martinez21a.pdf", "supp": "", "pdf_size": 6990660, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11688522208362271332&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Duke University; Duke University; University College London; University College London; Duke University", "aff_domain": "duke.edu;duke.edu; ; ; ", "email": "duke.edu;duke.edu; ; ; ", "github": "github.com/natalialmg/BlindParetoFairness", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/martinez21a.html", "aff_unique_index": "0;0;1;1;0", "aff_unique_norm": "Duke University;University College London", "aff_unique_dep": ";", "aff_unique_url": "https://www.duke.edu;https://www.ucl.ac.uk", "aff_unique_abbr": "Duke;UCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Boosting for Online Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10377", "id": "10377", "proceeding": "http://proceedings.mlr.press/v139/hazan21a.html", "slides": "/media/icml-2021/Slides/10377.pdf", "author_site": "Elad Hazan, Karan Singh", "author": "Elad Hazan; Karan Singh", "abstract": "We consider the decision-making framework of online convex optimization with a very large number of experts. This setting is ubiquitous in contextual and reinforcement learning problems, where the size of the policy class renders enumeration and search within the policy class infeasible. Instead, we consider generalizing the methodology of online boosting. We define a weak learning algorithm as a mechanism that guarantees multiplicatively approximate regret against a base class of experts. In this access model, we give an efficient boosting algorithm that guarantees near-optimal regret against the convex hull of the base class. We consider both full and partial (a.k.a. bandit) information feedback models. We also give an analogous efficient boosting algorithm for the i.i.d. statistical setting. Our results simultaneously generalize online boosting and gradient boosting guarantees to contextual learning model, online convex optimization and bandit linear optimization settings.", "bibtex": "@InProceedings{pmlr-v139-hazan21a,\n title = \t {Boosting for Online Convex Optimization},\n author = {Hazan, Elad and Singh, Karan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4140--4149},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hazan21a/hazan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hazan21a.html},\n abstract = \t {We consider the decision-making framework of online convex optimization with a very large number of experts. This setting is ubiquitous in contextual and reinforcement learning problems, where the size of the policy class renders enumeration and search within the policy class infeasible. Instead, we consider generalizing the methodology of online boosting. We define a weak learning algorithm as a mechanism that guarantees multiplicatively approximate regret against a base class of experts. In this access model, we give an efficient boosting algorithm that guarantees near-optimal regret against the convex hull of the base class. We consider both full and partial (a.k.a. bandit) information feedback models. We also give an analogous efficient boosting algorithm for the i.i.d. statistical setting. Our results simultaneously generalize online boosting and gradient boosting guarantees to contextual learning model, online convex optimization and bandit linear optimization settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/hazan21a/hazan21a.pdf", "supp": "", "pdf_size": 309998, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16166517307499512921&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Google AI Princeton, Princeton, NJ, USA+Princeton University, Princeton, NJ, USA; Microsoft Research, Redmond, WA, USA", "aff_domain": "cs.princeton.edu;microsoft.com", "email": "cs.princeton.edu;microsoft.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hazan21a.html", "aff_unique_index": "0+1;2", "aff_unique_norm": "Google;Princeton University;Microsoft", "aff_unique_dep": "Google AI;;Microsoft Research", "aff_unique_url": "https://ai.google;https://www.princeton.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Google AI;Princeton;MSR", "aff_campus_unique_index": "0+0;1", "aff_campus_unique": "Princeton;Redmond", "aff_country_unique_index": "0+0;0", "aff_country_unique": "United States" }, { "title": "Boosting the Throughput and Accelerator Utilization of Specialized CNN Inference Beyond Increasing Batch Size", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8415", "id": "8415", "proceeding": "http://proceedings.mlr.press/v139/kosaian21a.html", "slides": "", "author_site": "Jack Kosaian, Amar Phanishayee, Matthai Philipose, Debadeepta Dey, Rashmi Vinayak", "author": "Jack Kosaian; Amar Phanishayee; Matthai Philipose; Debadeepta Dey; Rashmi Vinayak", "abstract": "Datacenter vision systems widely use small, specialized convolutional neural networks (CNNs) trained on specific tasks for high-throughput inference. These settings employ accelerators with massive computational capacity, but which specialized CNNs underutilize due to having low arithmetic intensity. This results in suboptimal application-level throughput and poor returns on accelerator investment. Increasing batch size is the only known way to increase both application-level throughput and accelerator utilization for inference, but yields diminishing returns; specialized CNNs poorly utilize accelerators even with large batch size. We propose FoldedCNNs, a new approach to CNN design that increases inference throughput and utilization beyond large batch size. FoldedCNNs rethink the structure of inputs and layers of specialized CNNs to boost arithmetic intensity: in FoldedCNNs, f images with C channels each are concatenated into a single input with fC channels and jointly classified by a wider CNN. Increased arithmetic intensity in FoldedCNNs increases the throughput and GPU utilization of specialized CNN inference by up to 2.5x and 2.8x, with accuracy close to the original CNN in most cases.", "bibtex": "@InProceedings{pmlr-v139-kosaian21a,\n title = \t {Boosting the Throughput and Accelerator Utilization of Specialized CNN Inference Beyond Increasing Batch Size},\n author = {Kosaian, Jack and Phanishayee, Amar and Philipose, Matthai and Dey, Debadeepta and Vinayak, Rashmi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5731--5741},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kosaian21a/kosaian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kosaian21a.html},\n abstract = \t {Datacenter vision systems widely use small, specialized convolutional neural networks (CNNs) trained on specific tasks for high-throughput inference. These settings employ accelerators with massive computational capacity, but which specialized CNNs underutilize due to having low arithmetic intensity. This results in suboptimal application-level throughput and poor returns on accelerator investment. Increasing batch size is the only known way to increase both application-level throughput and accelerator utilization for inference, but yields diminishing returns; specialized CNNs poorly utilize accelerators even with large batch size. We propose FoldedCNNs, a new approach to CNN design that increases inference throughput and utilization beyond large batch size. FoldedCNNs rethink the structure of inputs and layers of specialized CNNs to boost arithmetic intensity: in FoldedCNNs, f images with C channels each are concatenated into a single input with fC channels and jointly classified by a wider CNN. Increased arithmetic intensity in FoldedCNNs increases the throughput and GPU utilization of specialized CNN inference by up to 2.5x and 2.8x, with accuracy close to the original CNN in most cases.}\n}", "pdf": "http://proceedings.mlr.press/v139/kosaian21a/kosaian21a.pdf", "supp": "", "pdf_size": 954373, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2832705445304188041&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University; Microsoft Research; Microsoft Research; Microsoft Research; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ; ; ", "email": "cs.cmu.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kosaian21a.html", "aff_unique_index": "0;1;1;1;0", "aff_unique_norm": "Carnegie Mellon University;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "CMU;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Bootstrapping Fitted Q-Evaluation for Off-Policy Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8507", "id": "8507", "proceeding": "http://proceedings.mlr.press/v139/hao21b.html", "slides": "", "author_site": "Botao Hao, Xiang Ji, Yaqi Duan, Hao Lu, Csaba Szepesvari, Mengdi Wang", "author": "Botao Hao; Xiang Ji; Yaqi Duan; Hao Lu; Csaba Szepesvari; Mengdi Wang", "abstract": "Bootstrapping provides a flexible and effective approach for assessing the quality of batch reinforcement learning, yet its theoretical properties are poorly understood. In this paper, we study the use of bootstrapping in off-policy evaluation (OPE), and in particular, we focus on the fitted Q-evaluation (FQE) that is known to be minimax-optimal in the tabular and linear-model cases. We propose a bootstrapping FQE method for inferring the distribution of the policy evaluation error and show that this method is asymptotically efficient and distributionally consistent for off-policy statistical inference. To overcome the computation limit of bootstrapping, we further adapt a subsampling procedure that improves the runtime by an order of magnitude. We numerically evaluate the bootrapping method in classical RL environments for confidence interval estimation, estimating the variance of off-policy evaluator, and estimating the correlation between multiple off-policy evaluators.", "bibtex": "@InProceedings{pmlr-v139-hao21b,\n title = \t {Bootstrapping Fitted Q-Evaluation for Off-Policy Inference},\n author = {Hao, Botao and Ji, Xiang and Duan, Yaqi and Lu, Hao and Szepesvari, Csaba and Wang, Mengdi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4074--4084},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hao21b/hao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/hao21b.html},\n abstract = \t {Bootstrapping provides a flexible and effective approach for assessing the quality of batch reinforcement learning, yet its theoretical properties are poorly understood. In this paper, we study the use of bootstrapping in off-policy evaluation (OPE), and in particular, we focus on the fitted Q-evaluation (FQE) that is known to be minimax-optimal in the tabular and linear-model cases. We propose a bootstrapping FQE method for inferring the distribution of the policy evaluation error and show that this method is asymptotically efficient and distributionally consistent for off-policy statistical inference. To overcome the computation limit of bootstrapping, we further adapt a subsampling procedure that improves the runtime by an order of magnitude. We numerically evaluate the bootrapping method in classical RL environments for confidence interval estimation, estimating the variance of off-policy evaluator, and estimating the correlation between multiple off-policy evaluators.}\n}", "pdf": "http://proceedings.mlr.press/v139/hao21b/hao21b.pdf", "supp": "", "pdf_size": 1251062, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12563793868739113646&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Deepmind+University of Alberta; Princeton University; Princeton University; Princeton University; Deepmind+University of Alberta+Princeton University; Princeton University", "aff_domain": "gmail.com; ; ; ; ;princeton.edu", "email": "gmail.com; ; ; ; ;princeton.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/hao21b.html", "aff_unique_index": "0+1;2;2;2;0+1+2;2", "aff_unique_norm": "DeepMind;University of Alberta;Princeton University", "aff_unique_dep": ";;", "aff_unique_url": "https://deepmind.com;https://www.ualberta.ca;https://www.princeton.edu", "aff_unique_abbr": "DeepMind;UAlberta;Princeton", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;2;2;2;0+1+2;2", "aff_country_unique": "United Kingdom;Canada;United States" }, { "title": "Break-It-Fix-It: Unsupervised Learning for Program Repair", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10473", "id": "10473", "proceeding": "http://proceedings.mlr.press/v139/yasunaga21a.html", "slides": "", "author_site": "Michihiro Yasunaga, Percy Liang", "author": "Michihiro Yasunaga; Percy Liang", "abstract": "We consider repair tasks: given a critic (e.g., compiler) that assesses the quality of an input, the goal is to train a fixer that converts a bad example (e.g., code with syntax errors) into a good one (e.g., code with no errors). Existing works create training data consisting of (bad, good) pairs by corrupting good examples using heuristics (e.g., dropping tokens). However, fixers trained on this synthetically-generated data do not extrapolate well to the real distribution of bad inputs. To bridge this gap, we propose a new training approach, Break-It-Fix-It (BIFI), which has two key ideas: (i) we use the critic to check a fixer\u2019s output on real bad inputs and add good (fixed) outputs to the training data, and (ii) we train a breaker to generate realistic bad code from good code. Based on these ideas, we iteratively update the breaker and the fixer while using them in conjunction to generate more paired data. We evaluate BIFI on two code repair datasets: GitHub-Python, a new dataset we introduce where the goal is to repair Python code with AST parse errors; and DeepFix, where the goal is to repair C code with compiler errors. BIFI outperforms existing methods, obtaining 90.5% repair accuracy on GitHub-Python (+28.5%) and 71.7% on DeepFix (+5.6%). Notably, BIFI does not require any labeled data; we hope it will be a strong starting point for unsupervised learning of various repair tasks.", "bibtex": "@InProceedings{pmlr-v139-yasunaga21a,\n title = \t {Break-It-Fix-It: Unsupervised Learning for Program Repair},\n author = {Yasunaga, Michihiro and Liang, Percy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11941--11952},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yasunaga21a/yasunaga21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yasunaga21a.html},\n abstract = \t {We consider repair tasks: given a critic (e.g., compiler) that assesses the quality of an input, the goal is to train a fixer that converts a bad example (e.g., code with syntax errors) into a good one (e.g., code with no errors). Existing works create training data consisting of (bad, good) pairs by corrupting good examples using heuristics (e.g., dropping tokens). However, fixers trained on this synthetically-generated data do not extrapolate well to the real distribution of bad inputs. To bridge this gap, we propose a new training approach, Break-It-Fix-It (BIFI), which has two key ideas: (i) we use the critic to check a fixer\u2019s output on real bad inputs and add good (fixed) outputs to the training data, and (ii) we train a breaker to generate realistic bad code from good code. Based on these ideas, we iteratively update the breaker and the fixer while using them in conjunction to generate more paired data. We evaluate BIFI on two code repair datasets: GitHub-Python, a new dataset we introduce where the goal is to repair Python code with AST parse errors; and DeepFix, where the goal is to repair C code with compiler errors. BIFI outperforms existing methods, obtaining 90.5% repair accuracy on GitHub-Python (+28.5%) and 71.7% on DeepFix (+5.6%). Notably, BIFI does not require any labeled data; we hope it will be a strong starting point for unsupervised learning of various repair tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/yasunaga21a/yasunaga21a.pdf", "supp": "", "pdf_size": 989877, "gs_citation": 145, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4368697690139646578&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Stanford University; Stanford University", "aff_domain": "cs.stanford.edu; ", "email": "cs.stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yasunaga21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Breaking the Deadly Triad with a Target Network", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8651", "id": "8651", "proceeding": "http://proceedings.mlr.press/v139/zhang21y.html", "slides": "/media/icml-2021/Slides/8651.pdf", "author_site": "Shangtong Zhang, Hengshuai Yao, Shimon Whiteson", "author": "Shangtong Zhang; Hengshuai Yao; Shimon Whiteson", "abstract": "The deadly triad refers to the instability of a reinforcement learning algorithm when it employs off-policy learning, function approximation, and bootstrapping simultaneously. In this paper, we investigate the target network as a tool for breaking the deadly triad, providing theoretical support for the conventional wisdom that a target network stabilizes training. We first propose and analyze a novel target network update rule which augments the commonly used Polyak-averaging style update with two projections. We then apply the target network and ridge regularization in several divergent algorithms and show their convergence to regularized TD fixed points. Those algorithms are off-policy with linear function approximation and bootstrapping, spanning both policy evaluation and control, as well as both discounted and average-reward settings. In particular, we provide the first convergent linear $Q$-learning algorithms under nonrestrictive and changing behavior policies without bi-level optimization.", "bibtex": "@InProceedings{pmlr-v139-zhang21y,\n title = \t {Breaking the Deadly Triad with a Target Network},\n author = {Zhang, Shangtong and Yao, Hengshuai and Whiteson, Shimon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12621--12631},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21y/zhang21y.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21y.html},\n abstract = \t {The deadly triad refers to the instability of a reinforcement learning algorithm when it employs off-policy learning, function approximation, and bootstrapping simultaneously. In this paper, we investigate the target network as a tool for breaking the deadly triad, providing theoretical support for the conventional wisdom that a target network stabilizes training. We first propose and analyze a novel target network update rule which augments the commonly used Polyak-averaging style update with two projections. We then apply the target network and ridge regularization in several divergent algorithms and show their convergence to regularized TD fixed points. Those algorithms are off-policy with linear function approximation and bootstrapping, spanning both policy evaluation and control, as well as both discounted and average-reward settings. In particular, we provide the first convergent linear $Q$-learning algorithms under nonrestrictive and changing behavior policies without bi-level optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21y/zhang21y.pdf", "supp": "", "pdf_size": 616554, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3294420755935359524&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Oxford; Huawei Technologies + University of Alberta; University of Oxford", "aff_domain": "cs.ox.ac.uk; ;cs.ox.ac.uk", "email": "cs.ox.ac.uk; ;cs.ox.ac.uk", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21y.html", "aff_unique_index": "0;1+2;0", "aff_unique_norm": "University of Oxford;Huawei;University of Alberta", "aff_unique_dep": ";Huawei Technologies;", "aff_unique_url": "https://www.ox.ac.uk;https://www.huawei.com;https://www.ualberta.ca", "aff_unique_abbr": "Oxford;Huawei;UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1+2;0", "aff_country_unique": "United Kingdom;China;Canada" }, { "title": "Breaking the Limits of Message Passing Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8577", "id": "8577", "proceeding": "http://proceedings.mlr.press/v139/balcilar21a.html", "slides": "/media/icml-2021/Slides/8577.pdf", "author_site": "Muhammet Balcilar, Pierre Heroux, Benoit Gauzere, Pascal Vasseur, Sebastien Adam, Paul Honeine", "author": "Muhammet Balcilar; Pierre Heroux; Benoit Gauzere; Pascal Vasseur; Sebastien Adam; Paul Honeine", "abstract": "Since the Message Passing (Graph) Neural Networks (MPNNs) have a linear complexity with respect to the number of nodes when applied to sparse graphs, they have been widely implemented and still raise a lot of interest even though their theoretical expressive power is limited to the first order Weisfeiler-Lehman test (1-WL). In this paper, we show that if the graph convolution supports are designed in spectral-domain by a non-linear custom function of eigenvalues and masked with an arbitrary large receptive field, the MPNN is theoretically more powerful than the 1-WL test and experimentally as powerful as a 3-WL existing models, while remaining spatially localized. Moreover, by designing custom filter functions, outputs can have various frequency components that allow the convolution process to learn different relationships between a given input graph signal and its associated properties. So far, the best 3-WL equivalent graph neural networks have a computational complexity in $\\mathcal{O}(n^3)$ with memory usage in $\\mathcal{O}(n^2)$, consider non-local update mechanism and do not provide the spectral richness of output profile. The proposed method overcomes all these aforementioned problems and reaches state-of-the-art results in many downstream tasks.", "bibtex": "@InProceedings{pmlr-v139-balcilar21a,\n title = \t {Breaking the Limits of Message Passing Graph Neural Networks},\n author = {Balcilar, Muhammet and Heroux, Pierre and Gauzere, Benoit and Vasseur, Pascal and Adam, Sebastien and Honeine, Paul},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {599--608},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/balcilar21a/balcilar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/balcilar21a.html},\n abstract = \t {Since the Message Passing (Graph) Neural Networks (MPNNs) have a linear complexity with respect to the number of nodes when applied to sparse graphs, they have been widely implemented and still raise a lot of interest even though their theoretical expressive power is limited to the first order Weisfeiler-Lehman test (1-WL). In this paper, we show that if the graph convolution supports are designed in spectral-domain by a non-linear custom function of eigenvalues and masked with an arbitrary large receptive field, the MPNN is theoretically more powerful than the 1-WL test and experimentally as powerful as a 3-WL existing models, while remaining spatially localized. Moreover, by designing custom filter functions, outputs can have various frequency components that allow the convolution process to learn different relationships between a given input graph signal and its associated properties. So far, the best 3-WL equivalent graph neural networks have a computational complexity in $\\mathcal{O}(n^3)$ with memory usage in $\\mathcal{O}(n^2)$, consider non-local update mechanism and do not provide the spectral richness of output profile. The proposed method overcomes all these aforementioned problems and reaches state-of-the-art results in many downstream tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/balcilar21a/balcilar21a.pdf", "supp": "", "pdf_size": 475584, "gs_citation": 168, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7981688691402609281&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/balcilar21a.html" }, { "title": "Bridging Multi-Task Learning and Meta-Learning: Towards Efficient Training and Effective Adaptation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10407", "id": "10407", "proceeding": "http://proceedings.mlr.press/v139/wang21ad.html", "slides": "/media/icml-2021/Slides/10407.pdf", "author_site": "Haoxiang Wang, Han Zhao, Bo Li", "author": "Haoxiang Wang; Han Zhao; Bo Li", "abstract": "Multi-task learning (MTL) aims to improve the generalization of several related tasks by learning them jointly. As a comparison, in addition to the joint training scheme, modern meta-learning allows unseen tasks with limited labels during the test phase, in the hope of fast adaptation over them. Despite the subtle difference between MTL and meta-learning in the problem formulation, both learning paradigms share the same insight that the shared structure between existing training tasks could lead to better generalization and adaptation. In this paper, we take one important step further to understand the close connection between these two learning paradigms, through both theoretical analysis and empirical investigation. Theoretically, we first demonstrate that MTL shares the same optimization formulation with a class of gradient-based meta-learning (GBML) algorithms. We then prove that for over-parameterized neural networks with sufficient depth, the learned predictive functions of MTL and GBML are close. In particular, this result implies that the predictions given by these two models are similar over the same unseen task. Empirically, we corroborate our theoretical findings by showing that, with proper implementation, MTL is competitive against state-of-the-art GBML algorithms on a set of few-shot image classification benchmarks. Since existing GBML algorithms often involve costly second-order bi-level optimization, our first-order MTL method is an order of magnitude faster on large-scale datasets such as mini-ImageNet. We believe this work could help bridge the gap between these two learning paradigms, and provide a computationally efficient alternative to GBML that also supports fast task adaptation.", "bibtex": "@InProceedings{pmlr-v139-wang21ad,\n title = \t {Bridging Multi-Task Learning and Meta-Learning: Towards Efficient Training and Effective Adaptation},\n author = {Wang, Haoxiang and Zhao, Han and Li, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10991--11002},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21ad/wang21ad.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21ad.html},\n abstract = \t {Multi-task learning (MTL) aims to improve the generalization of several related tasks by learning them jointly. As a comparison, in addition to the joint training scheme, modern meta-learning allows unseen tasks with limited labels during the test phase, in the hope of fast adaptation over them. Despite the subtle difference between MTL and meta-learning in the problem formulation, both learning paradigms share the same insight that the shared structure between existing training tasks could lead to better generalization and adaptation. In this paper, we take one important step further to understand the close connection between these two learning paradigms, through both theoretical analysis and empirical investigation. Theoretically, we first demonstrate that MTL shares the same optimization formulation with a class of gradient-based meta-learning (GBML) algorithms. We then prove that for over-parameterized neural networks with sufficient depth, the learned predictive functions of MTL and GBML are close. In particular, this result implies that the predictions given by these two models are similar over the same unseen task. Empirically, we corroborate our theoretical findings by showing that, with proper implementation, MTL is competitive against state-of-the-art GBML algorithms on a set of few-shot image classification benchmarks. Since existing GBML algorithms often involve costly second-order bi-level optimization, our first-order MTL method is an order of magnitude faster on large-scale datasets such as mini-ImageNet. We believe this work could help bridge the gap between these two learning paradigms, and provide a computationally efficient alternative to GBML that also supports fast task adaptation.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21ad/wang21ad.pdf", "supp": "", "pdf_size": 637219, "gs_citation": 116, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5814522177483838670&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu;illinois.edu;illinois.edu", "email": "illinois.edu;illinois.edu;illinois.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wang21ad.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Budgeted Heterogeneous Treatment Effect Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10267", "id": "10267", "proceeding": "http://proceedings.mlr.press/v139/qin21b.html", "slides": "", "author_site": "Tian Qin, Tian-Zuo Wang, Zhi-Hua Zhou", "author": "Tian Qin; Tian-Zuo Wang; Zhi-Hua Zhou", "abstract": "Heterogeneous treatment effect (HTE) estimation is receiving increasing interest due to its important applications in fields such as healthcare, economics, and education. Current HTE estimation methods generally assume the existence of abundant observational data, though the acquisition of such data can be costly. In some real scenarios, it is easy to access the pre-treatment covariates and treatment assignments, but expensive to obtain the factual outcomes. To make HTE estimation more practical, in this paper, we examine the problem of estimating HTEs with a budget constraint on observational data, aiming to obtain accurate HTE estimates with limited costs. By deriving an informative generalization bound and connecting to active learning, we propose an effective and efficient method which is validated both theoretically and empirically.", "bibtex": "@InProceedings{pmlr-v139-qin21b,\n title = \t {Budgeted Heterogeneous Treatment Effect Estimation},\n author = {Qin, Tian and Wang, Tian-Zuo and Zhou, Zhi-Hua},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8693--8702},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qin21b/qin21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/qin21b.html},\n abstract = \t {Heterogeneous treatment effect (HTE) estimation is receiving increasing interest due to its important applications in fields such as healthcare, economics, and education. Current HTE estimation methods generally assume the existence of abundant observational data, though the acquisition of such data can be costly. In some real scenarios, it is easy to access the pre-treatment covariates and treatment assignments, but expensive to obtain the factual outcomes. To make HTE estimation more practical, in this paper, we examine the problem of estimating HTEs with a budget constraint on observational data, aiming to obtain accurate HTE estimates with limited costs. By deriving an informative generalization bound and connecting to active learning, we propose an effective and efficient method which is validated both theoretically and empirically.}\n}", "pdf": "http://proceedings.mlr.press/v139/qin21b/qin21b.pdf", "supp": "", "pdf_size": 1459055, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8956578090492746667&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China", "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", "email": "lamda.nju.edu.cn;lamda.nju.edu.cn;lamda.nju.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/qin21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "National Key Laboratory for Novel Software Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "Nanjing U", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Nanjing", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Byzantine-Resilient High-Dimensional SGD with Local Iterations on Heterogeneous Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8727", "id": "8727", "proceeding": "http://proceedings.mlr.press/v139/data21a.html", "slides": "/media/icml-2021/Slides/8727.pdf", "author_site": "Deepesh Data, Suhas Diggavi", "author": "Deepesh Data; Suhas Diggavi", "abstract": "We study stochastic gradient descent (SGD) with local iterations in the presence of Byzantine clients, motivated by the federated learning. The clients, instead of communicating with the server in every iteration, maintain their local models, which they update by taking several SGD iterations based on their own datasets and then communicate the net update with the server, thereby achieving communication-efficiency. Furthermore, only a subset of clients communicates with the server at synchronization times. The Byzantine clients may collude and send arbitrary vectors to the server to disrupt the learning process. To combat the adversary, we employ an efficient high-dimensional robust mean estimation algorithm at the server to filter-out corrupt vectors; and to analyze the outlier-filtering procedure, we develop a novel matrix concentration result that may be of independent interest. We provide convergence analyses for both strongly-convex and non-convex smooth objectives in the heterogeneous data setting. We believe that ours is the first Byzantine-resilient local SGD algorithm and analysis with non-trivial guarantees. We corroborate our theoretical results with preliminary experiments for neural network training.", "bibtex": "@InProceedings{pmlr-v139-data21a,\n title = \t {Byzantine-Resilient High-Dimensional SGD with Local Iterations on Heterogeneous Data},\n author = {Data, Deepesh and Diggavi, Suhas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2478--2488},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/data21a/data21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/data21a.html},\n abstract = \t {We study stochastic gradient descent (SGD) with local iterations in the presence of Byzantine clients, motivated by the federated learning. The clients, instead of communicating with the server in every iteration, maintain their local models, which they update by taking several SGD iterations based on their own datasets and then communicate the net update with the server, thereby achieving communication-efficiency. Furthermore, only a subset of clients communicates with the server at synchronization times. The Byzantine clients may collude and send arbitrary vectors to the server to disrupt the learning process. To combat the adversary, we employ an efficient high-dimensional robust mean estimation algorithm at the server to filter-out corrupt vectors; and to analyze the outlier-filtering procedure, we develop a novel matrix concentration result that may be of independent interest. We provide convergence analyses for both strongly-convex and non-convex smooth objectives in the heterogeneous data setting. We believe that ours is the first Byzantine-resilient local SGD algorithm and analysis with non-trivial guarantees. We corroborate our theoretical results with preliminary experiments for neural network training.}\n}", "pdf": "http://proceedings.mlr.press/v139/data21a/data21a.pdf", "supp": "", "pdf_size": 1268539, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6366885263673843924&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of California, Los Angeles, USA; University of California, Los Angeles, USA", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/data21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "CARTL: Cooperative Adversarially-Robust Transfer Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9669", "id": "9669", "proceeding": "http://proceedings.mlr.press/v139/chen21k.html", "slides": "/media/icml-2021/Slides/9669.pdf", "author_site": "Dian Chen, Hongxin Hu, Qian Wang, Li Yinli, Cong Wang, Chao Shen, Qi Li", "author": "Dian Chen; Hongxin Hu; Qian Wang; Li Yinli; Cong Wang; Chao Shen; Qi Li", "abstract": "Transfer learning eases the burden of training a well-performed model from scratch, especially when training data is scarce and computation power is limited. In deep learning, a typical strategy for transfer learning is to freeze the early layers of a pre-trained model and fine-tune the rest of its layers on the target domain. Previous work focuses on the accuracy of the transferred model but neglects the transfer of adversarial robustness. In this work, we first show that transfer learning improves the accuracy on the target domain but degrades the inherited robustness of the target model. To address such a problem, we propose a novel cooperative adversarially-robust transfer learning (CARTL) by pre-training the model via feature distance minimization and fine-tuning the pre-trained model with non-expansive fine-tuning for target domain tasks. Empirical results show that CARTL improves the inherited robustness by about 28% at most compared with the baseline with the same degree of accuracy. Furthermore, we study the relationship between the batch normalization (BN) layers and the robustness in the context of transfer learning, and we reveal that freezing BN layers can further boost the robustness transfer.", "bibtex": "@InProceedings{pmlr-v139-chen21k,\n title = \t {CARTL: Cooperative Adversarially-Robust Transfer Learning},\n author = {Chen, Dian and Hu, Hongxin and Wang, Qian and Yinli, Li and Wang, Cong and Shen, Chao and Li, Qi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1640--1650},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21k/chen21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21k.html},\n abstract = \t {Transfer learning eases the burden of training a well-performed model from scratch, especially when training data is scarce and computation power is limited. In deep learning, a typical strategy for transfer learning is to freeze the early layers of a pre-trained model and fine-tune the rest of its layers on the target domain. Previous work focuses on the accuracy of the transferred model but neglects the transfer of adversarial robustness. In this work, we first show that transfer learning improves the accuracy on the target domain but degrades the inherited robustness of the target model. To address such a problem, we propose a novel cooperative adversarially-robust transfer learning (CARTL) by pre-training the model via feature distance minimization and fine-tuning the pre-trained model with non-expansive fine-tuning for target domain tasks. Empirical results show that CARTL improves the inherited robustness by about 28% at most compared with the baseline with the same degree of accuracy. Furthermore, we study the relationship between the batch normalization (BN) layers and the robustness in the context of transfer learning, and we reveal that freezing BN layers can further boost the robustness transfer.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21k/chen21k.pdf", "supp": "", "pdf_size": 594481, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16986605262499697725&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/chen21k.html" }, { "title": "CATE: Computation-aware Neural Architecture Encoding with Transformers", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9051", "id": "9051", "proceeding": "http://proceedings.mlr.press/v139/yan21c.html", "slides": "/media/icml-2021/Slides/9051.pdf", "author_site": "Shen Yan, Kaiqiang Song, Fei Liu, Mi Zhang", "author": "Shen Yan; Kaiqiang Song; Fei Liu; Mi Zhang", "abstract": "Recent works (White et al., 2020a; Yan et al., 2020) demonstrate the importance of architecture encodings in Neural Architecture Search (NAS). These encodings encode either structure or computation information of the neural architectures. Compared to structure-aware encodings, computation-aware encodings map architectures with similar accuracies to the same region, which improves the downstream architecture search performance (Zhang et al., 2019; White et al., 2020a). In this work, we introduce a Computation-Aware Transformer-based Encoding method called CATE. Different from existing computation-aware encodings based on fixed transformation (e.g. path encoding), CATE employs a pairwise pre-training scheme to learn computation-aware encodings using Transformers with cross-attention. Such learned encodings contain dense and contextualized computation information of neural architectures. We compare CATE with eleven encodings under three major encoding-dependent NAS subroutines in both small and large search spaces. Our experiments show that CATE is beneficial to the downstream search, especially in the large search space. Moreover, the outside search space experiment demonstrates its superior generalization ability beyond the search space on which it was trained. Our code is available at: https://github.com/MSU-MLSys-Lab/CATE.", "bibtex": "@InProceedings{pmlr-v139-yan21c,\n title = \t {CATE: Computation-aware Neural Architecture Encoding with Transformers},\n author = {Yan, Shen and Song, Kaiqiang and Liu, Fei and Zhang, Mi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11670--11681},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yan21c/yan21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yan21c.html},\n abstract = \t {Recent works (White et al., 2020a; Yan et al., 2020) demonstrate the importance of architecture encodings in Neural Architecture Search (NAS). These encodings encode either structure or computation information of the neural architectures. Compared to structure-aware encodings, computation-aware encodings map architectures with similar accuracies to the same region, which improves the downstream architecture search performance (Zhang et al., 2019; White et al., 2020a). In this work, we introduce a Computation-Aware Transformer-based Encoding method called CATE. Different from existing computation-aware encodings based on fixed transformation (e.g. path encoding), CATE employs a pairwise pre-training scheme to learn computation-aware encodings using Transformers with cross-attention. Such learned encodings contain dense and contextualized computation information of neural architectures. We compare CATE with eleven encodings under three major encoding-dependent NAS subroutines in both small and large search spaces. Our experiments show that CATE is beneficial to the downstream search, especially in the large search space. Moreover, the outside search space experiment demonstrates its superior generalization ability beyond the search space on which it was trained. Our code is available at: https://github.com/MSU-MLSys-Lab/CATE.}\n}", "pdf": "http://proceedings.mlr.press/v139/yan21c/yan21c.pdf", "supp": "", "pdf_size": 862827, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8641165479167437291&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Michigan State University; University of Central Florida + Tencent AI Lab; University of Central Florida; Michigan State University", "aff_domain": "msu.edu; ; ; ", "email": "msu.edu; ; ; ", "github": "https://github.com/MSU-MLSys-Lab/CATE", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yan21c.html", "aff_unique_index": "0;1+2;1;0", "aff_unique_norm": "Michigan State University;University of Central Florida;Tencent", "aff_unique_dep": ";;Tencent AI Lab", "aff_unique_url": "https://www.msu.edu;https://www.ucf.edu;https://ai.tencent.com", "aff_unique_abbr": "MSU;UCF;Tencent AI Lab", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+1;0;0", "aff_country_unique": "United States;China" }, { "title": "CIFS: Improving Adversarial Robustness of CNNs via Channel-wise Importance-based Feature Selection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10647", "id": "10647", "proceeding": "http://proceedings.mlr.press/v139/yan21e.html", "slides": "/media/icml-2021/Slides/10647.pdf", "author_site": "Hanshu YAN, Jingfeng Zhang, Gang Niu, Jiashi Feng, Vincent Tan, Masashi Sugiyama", "author": "Hanshu Yan; Jingfeng Zhang; Gang Niu; Jiashi Feng; Vincent Tan; Masashi Sugiyama", "abstract": "We investigate the adversarial robustness of CNNs from the perspective of channel-wise activations. By comparing normally trained and adversarially trained models, we observe that adversarial training (AT) robustifies CNNs by aligning the channel-wise activations of adversarial data with those of their natural counterparts. However, the channels that are \\textit{negatively-relevant} (NR) to predictions are still over-activated when processing adversarial data. Besides, we also observe that AT does not result in similar robustness for all classes. For the robust classes, channels with larger activation magnitudes are usually more \\textit{positively-relevant} (PR) to predictions, but this alignment does not hold for the non-robust classes. Given these observations, we hypothesize that suppressing NR channels and aligning PR ones with their relevances further enhances the robustness of CNNs under AT. To examine this hypothesis, we introduce a novel mechanism, \\textit{i.e.}, \\underline{C}hannel-wise \\underline{I}mportance-based \\underline{F}eature \\underline{S}election (CIFS). The CIFS manipulates channels\u2019 activations of certain layers by generating non-negative multipliers to these channels based on their relevances to predictions. Extensive experiments on benchmark datasets including CIFAR10 and SVHN clearly verify the hypothesis and CIFS\u2019s effectiveness of robustifying CNNs.", "bibtex": "@InProceedings{pmlr-v139-yan21e,\n title = \t {CIFS: Improving Adversarial Robustness of CNNs via Channel-wise Importance-based Feature Selection},\n author = {Yan, Hanshu and Zhang, Jingfeng and Niu, Gang and Feng, Jiashi and Tan, Vincent and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11693--11703},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yan21e/yan21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/yan21e.html},\n abstract = \t {We investigate the adversarial robustness of CNNs from the perspective of channel-wise activations. By comparing normally trained and adversarially trained models, we observe that adversarial training (AT) robustifies CNNs by aligning the channel-wise activations of adversarial data with those of their natural counterparts. However, the channels that are \\textit{negatively-relevant} (NR) to predictions are still over-activated when processing adversarial data. Besides, we also observe that AT does not result in similar robustness for all classes. For the robust classes, channels with larger activation magnitudes are usually more \\textit{positively-relevant} (PR) to predictions, but this alignment does not hold for the non-robust classes. Given these observations, we hypothesize that suppressing NR channels and aligning PR ones with their relevances further enhances the robustness of CNNs under AT. To examine this hypothesis, we introduce a novel mechanism, \\textit{i.e.}, \\underline{C}hannel-wise \\underline{I}mportance-based \\underline{F}eature \\underline{S}election (CIFS). The CIFS manipulates channels\u2019 activations of certain layers by generating non-negative multipliers to these channels based on their relevances to predictions. Extensive experiments on benchmark datasets including CIFAR10 and SVHN clearly verify the hypothesis and CIFS\u2019s effectiveness of robustifying CNNs.}\n}", "pdf": "http://proceedings.mlr.press/v139/yan21e/yan21e.pdf", "supp": "", "pdf_size": 1823228, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8868056595393232823&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, National University of Singapore, Singapore; RIKEN Center for Advanced Intelligence Project (AIP), Tokyo, Japan; RIKEN Center for Advanced Intelligence Project (AIP), Tokyo, Japan; Department of Electrical and Computer Engineering, National University of Singapore, Singapore; Department of Mathematics, National University of Singapore, Singapore + Graduate School of Frontier Sciences, The University of Tokyo, Tokyo, Japan; RIKEN Center for Advanced Intelligence Project (AIP), Tokyo, Japan + Graduate School of Frontier Sciences, The University of Tokyo, Tokyo, Japan", "aff_domain": "riken.jp; ; ; ; ; ", "email": "riken.jp; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yan21e.html", "aff_unique_index": "0;1;1;0;0+2;1+2", "aff_unique_norm": "National University of Singapore;RIKEN Center for Advanced Intelligence Project;University of Tokyo", "aff_unique_dep": "Department of Electrical and Computer Engineering;Advanced Intelligence Project;Graduate School of Frontier Sciences", "aff_unique_url": "https://www.nus.edu.sg;https://aipcenter.riken.jp/en/;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "NUS;RIKEN AIP;UTokyo", "aff_campus_unique_index": "1;1;1;1+1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0;1;1;0;0+1;1+1", "aff_country_unique": "Singapore;Japan" }, { "title": "CLOCS: Contrastive Learning of Cardiac Signals Across Space, Time, and Patients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8461", "id": "8461", "proceeding": "http://proceedings.mlr.press/v139/kiyasseh21a.html", "slides": "", "author_site": "Dani Kiyasseh, Tingting Zhu, David Clifton", "author": "Dani Kiyasseh; Tingting Zhu; David A Clifton", "abstract": "The healthcare industry generates troves of unlabelled physiological data. This data can be exploited via contrastive learning, a self-supervised pre-training method that encourages representations of instances to be similar to one another. We propose a family of contrastive learning methods, CLOCS, that encourages representations across space, time, \\textit{and} patients to be similar to one another. We show that CLOCS consistently outperforms the state-of-the-art methods, BYOL and SimCLR, when performing a linear evaluation of, and fine-tuning on, downstream tasks. We also show that CLOCS achieves strong generalization performance with only 25% of labelled training data. Furthermore, our training procedure naturally generates patient-specific representations that can be used to quantify patient-similarity.", "bibtex": "@InProceedings{pmlr-v139-kiyasseh21a,\n title = \t {CLOCS: Contrastive Learning of Cardiac Signals Across Space, Time, and Patients},\n author = {Kiyasseh, Dani and Zhu, Tingting and Clifton, David A},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5606--5615},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kiyasseh21a/kiyasseh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kiyasseh21a.html},\n abstract = \t {The healthcare industry generates troves of unlabelled physiological data. This data can be exploited via contrastive learning, a self-supervised pre-training method that encourages representations of instances to be similar to one another. We propose a family of contrastive learning methods, CLOCS, that encourages representations across space, time, \\textit{and} patients to be similar to one another. We show that CLOCS consistently outperforms the state-of-the-art methods, BYOL and SimCLR, when performing a linear evaluation of, and fine-tuning on, downstream tasks. We also show that CLOCS achieves strong generalization performance with only 25% of labelled training data. Furthermore, our training procedure naturally generates patient-specific representations that can be used to quantify patient-similarity.}\n}", "pdf": "http://proceedings.mlr.press/v139/kiyasseh21a/kiyasseh21a.pdf", "supp": "", "pdf_size": 1650455, "gs_citation": 222, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16333919134757348473&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Engineering Science, University of Oxford, Oxford, United Kingdom+Oxford-Suzhou Centre for Advanced Research, Suzhou, China; Department of Engineering Science, University of Oxford, Oxford, United Kingdom+Oxford-Suzhou Centre for Advanced Research, Suzhou, China; Department of Engineering Science, University of Oxford, Oxford, United Kingdom+Oxford-Suzhou Centre for Advanced Research, Suzhou, China", "aff_domain": "eng.ox.ac.uk; ; ", "email": "eng.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kiyasseh21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of Oxford;Oxford-Suzhou Centre for Advanced Research", "aff_unique_dep": "Department of Engineering Science;", "aff_unique_url": "https://www.ox.ac.uk;", "aff_unique_abbr": "Oxford;", "aff_campus_unique_index": "0+1;0+1;0+1", "aff_campus_unique": "Oxford;Suzhou", "aff_country_unique_index": "0+1;0+1;0+1", "aff_country_unique": "United Kingdom;China" }, { "title": "CRFL: Certifiably Robust Federated Learning against Backdoor Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9201", "id": "9201", "proceeding": "http://proceedings.mlr.press/v139/xie21a.html", "slides": "/media/icml-2021/Slides/9201.pdf", "author_site": "Chulin Xie, Minghao Chen, Pin-Yu Chen, Bo Li", "author": "Chulin Xie; Minghao Chen; Pin-Yu Chen; Bo Li", "abstract": "Federated Learning (FL) as a distributed learning paradigm that aggregates information from diverse clients to train a shared global model, has demonstrated great success. However, malicious clients can perform poisoning attacks and model replacement to introduce backdoors into the trained global model. Although there have been intensive studies designing robust aggregation methods and empirical robust federated training protocols against backdoors, existing approaches lack robustness certification. This paper provides the first general framework, Certifiably Robust Federated Learning (CRFL), to train certifiably robust FL models against backdoors. Our method exploits clipping and smoothing on model parameters to control the global model smoothness, which yields a sample-wise robustness certification on backdoors with limited magnitude. Our certification also specifies the relation to federated learning parameters, such as poisoning ratio on instance level, number of attackers, and training iterations. Practically, we conduct comprehensive experiments across a range of federated datasets, and provide the first benchmark for certified robustness against backdoor attacks in federated learning. Our code is publicaly available at https://github.com/AI-secure/CRFL.", "bibtex": "@InProceedings{pmlr-v139-xie21a,\n title = \t {CRFL: Certifiably Robust Federated Learning against Backdoor Attacks},\n author = {Xie, Chulin and Chen, Minghao and Chen, Pin-Yu and Li, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11372--11382},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21a/xie21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21a.html},\n abstract = \t {Federated Learning (FL) as a distributed learning paradigm that aggregates information from diverse clients to train a shared global model, has demonstrated great success. However, malicious clients can perform poisoning attacks and model replacement to introduce backdoors into the trained global model. Although there have been intensive studies designing robust aggregation methods and empirical robust federated training protocols against backdoors, existing approaches lack robustness certification. This paper provides the first general framework, Certifiably Robust Federated Learning (CRFL), to train certifiably robust FL models against backdoors. Our method exploits clipping and smoothing on model parameters to control the global model smoothness, which yields a sample-wise robustness certification on backdoors with limited magnitude. Our certification also specifies the relation to federated learning parameters, such as poisoning ratio on instance level, number of attackers, and training iterations. Practically, we conduct comprehensive experiments across a range of federated datasets, and provide the first benchmark for certified robustness against backdoor attacks in federated learning. Our code is publicaly available at https://github.com/AI-secure/CRFL.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21a/xie21a.pdf", "supp": "", "pdf_size": 3794252, "gs_citation": 243, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=566297691223350385&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Illinois at Urbana-Champaign; Zhejiang University; IBM Research; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu; ;ibm.com;illinois.edu", "email": "illinois.edu; ;ibm.com;illinois.edu", "github": "https://github.com/AI-secure/CRFL", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xie21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of Illinois Urbana-Champaign;Zhejiang University;IBM", "aff_unique_dep": ";;IBM Research", "aff_unique_url": "https://illinois.edu;https://www.zju.edu.cn;https://www.ibm.com/research", "aff_unique_abbr": "UIUC;ZJU;IBM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Urbana-Champaign;", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "CRPO: A New Approach for Safe Reinforcement Learning with Convergence Guarantee", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8995", "id": "8995", "proceeding": "http://proceedings.mlr.press/v139/xu21a.html", "slides": "/media/icml-2021/Slides/8995.pdf", "author_site": "Tengyu Xu, Yingbin LIANG, Guanghui Lan", "author": "Tengyu Xu; Yingbin Liang; Guanghui Lan", "abstract": "In safe reinforcement learning (SRL) problems, an agent explores the environment to maximize an expected total reward and meanwhile avoids violation of certain constraints on a number of expected total costs. In general, such SRL problems have nonconvex objective functions subject to multiple nonconvex constraints, and hence are very challenging to solve, particularly to provide a globally optimal policy. Many popular SRL algorithms adopt a primal-dual structure which utilizes the updating of dual variables for satisfying the constraints. In contrast, we propose a primal approach, called constraint-rectified policy optimization (CRPO), which updates the policy alternatingly between objective improvement and constraint satisfaction. CRPO provides a primal-type algorithmic framework to solve SRL problems, where each policy update can take any variant of policy optimization step. To demonstrate the theoretical performance of CRPO, we adopt natural policy gradient (NPG) for each policy update step and show that CRPO achieves an $\\mathcal{O}(1/\\sqrt{T})$ convergence rate to the global optimal policy in the constrained policy set and an $\\mathcal{O}(1/\\sqrt{T})$ error bound on constraint satisfaction. This is the first finite-time analysis of primal SRL algorithms with global optimality guarantee. Our empirical results demonstrate that CRPO can outperform the existing primal-dual baseline algorithms significantly.", "bibtex": "@InProceedings{pmlr-v139-xu21a,\n title = \t {CRPO: A New Approach for Safe Reinforcement Learning with Convergence Guarantee},\n author = {Xu, Tengyu and Liang, Yingbin and Lan, Guanghui},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11480--11491},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21a/xu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21a.html},\n abstract = \t {In safe reinforcement learning (SRL) problems, an agent explores the environment to maximize an expected total reward and meanwhile avoids violation of certain constraints on a number of expected total costs. In general, such SRL problems have nonconvex objective functions subject to multiple nonconvex constraints, and hence are very challenging to solve, particularly to provide a globally optimal policy. Many popular SRL algorithms adopt a primal-dual structure which utilizes the updating of dual variables for satisfying the constraints. In contrast, we propose a primal approach, called constraint-rectified policy optimization (CRPO), which updates the policy alternatingly between objective improvement and constraint satisfaction. CRPO provides a primal-type algorithmic framework to solve SRL problems, where each policy update can take any variant of policy optimization step. To demonstrate the theoretical performance of CRPO, we adopt natural policy gradient (NPG) for each policy update step and show that CRPO achieves an $\\mathcal{O}(1/\\sqrt{T})$ convergence rate to the global optimal policy in the constrained policy set and an $\\mathcal{O}(1/\\sqrt{T})$ error bound on constraint satisfaction. This is the first finite-time analysis of primal SRL algorithms with global optimality guarantee. Our empirical results demonstrate that CRPO can outperform the existing primal-dual baseline algorithms significantly.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21a/xu21a.pdf", "supp": "", "pdf_size": 934648, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9605486372134552330&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, The Ohio State University, OH, United States; Department of Electrical and Computer Engineering, The Ohio State University, OH, United States; Industrial and Systems Engineering, Georgia Institute of Technology, GA, United States", "aff_domain": "osu.edu; ; ", "email": "osu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xu21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Ohio State University;Georgia Institute of Technology", "aff_unique_dep": "Department of Electrical and Computer Engineering;Industrial and Systems Engineering", "aff_unique_url": "https://www.osu.edu;https://www.gatech.edu", "aff_unique_abbr": "OSU;Georgia Tech", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "OH;Georgia", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "CURI: A Benchmark for Productive Concept Learning Under Uncertainty", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9597", "id": "9597", "proceeding": "http://proceedings.mlr.press/v139/vedantam21a.html", "slides": "", "author_site": "Shanmukha Ramakrishna Vedantam, Arthur Szlam, Maximilian Nickel, Ari Morcos, Brenden Lake", "author": "Ramakrishna Vedantam; Arthur Szlam; Maximillian Nickel; Ari Morcos; Brenden M Lake", "abstract": "Humans can learn and reason under substantial uncertainty in a space of infinitely many compositional, productive concepts. For example, if a scene with two blue spheres qualifies as \u201cdaxy,\u201d one can reason that the underlying concept may require scenes to have \u201conly blue spheres\u201d or \u201conly spheres\u201d or \u201conly two objects.\u201d In contrast, standard benchmarks for compositional reasoning do not explicitly capture a notion of reasoning under uncertainty or evaluate compositional concept acquisition. We introduce a new benchmark, Compositional Reasoning Under Uncertainty (CURI) that instantiates a series of few-shot, meta-learning tasks in a productive concept space to evaluate different aspects of systematic generalization under uncertainty, including splits that test abstract understandings of disentangling, productive generalization, learning boolean operations, variable binding, etc. Importantly, we also contribute a model-independent \u201ccompositionality gap\u201d to evaluate the difficulty of generalizing out-of-distribution along each of these axes, allowing objective comparison of the difficulty of each compositional split. Evaluations across a range of modeling choices and splits reveal substantial room for improvement on the proposed benchmark.", "bibtex": "@InProceedings{pmlr-v139-vedantam21a,\n title = \t {CURI: A Benchmark for Productive Concept Learning Under Uncertainty},\n author = {Vedantam, Ramakrishna and Szlam, Arthur and Nickel, Maximillian and Morcos, Ari and Lake, Brenden M},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10519--10529},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vedantam21a/vedantam21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vedantam21a.html},\n abstract = \t {Humans can learn and reason under substantial uncertainty in a space of infinitely many compositional, productive concepts. For example, if a scene with two blue spheres qualifies as \u201cdaxy,\u201d one can reason that the underlying concept may require scenes to have \u201conly blue spheres\u201d or \u201conly spheres\u201d or \u201conly two objects.\u201d In contrast, standard benchmarks for compositional reasoning do not explicitly capture a notion of reasoning under uncertainty or evaluate compositional concept acquisition. We introduce a new benchmark, Compositional Reasoning Under Uncertainty (CURI) that instantiates a series of few-shot, meta-learning tasks in a productive concept space to evaluate different aspects of systematic generalization under uncertainty, including splits that test abstract understandings of disentangling, productive generalization, learning boolean operations, variable binding, etc. Importantly, we also contribute a model-independent \u201ccompositionality gap\u201d to evaluate the difficulty of generalizing out-of-distribution along each of these axes, allowing objective comparison of the difficulty of each compositional split. Evaluations across a range of modeling choices and splits reveal substantial room for improvement on the proposed benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/vedantam21a/vedantam21a.pdf", "supp": "", "pdf_size": 3660880, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8315298936162694296&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Facebook AI Research (FAIR), USA; Facebook AI Research (FAIR), USA; Facebook AI Research (FAIR), USA; Facebook AI Research (FAIR), USA; Facebook AI Research (FAIR), USA + New York University (NYU), USA", "aff_domain": "fb.com; ; ; ; ", "email": "fb.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/vedantam21a.html", "aff_unique_index": "0;0;0;0;0+1", "aff_unique_norm": "Meta;New York University", "aff_unique_dep": "AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.nyu.edu", "aff_unique_abbr": "FAIR;NYU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Calibrate Before Use: Improving Few-shot Performance of Language Models", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10185", "id": "10185", "proceeding": "http://proceedings.mlr.press/v139/zhao21c.html", "slides": "", "author_site": "Tony Z. Zhao, Eric Wallace, Shi Feng, Dan Klein, Sameer Singh", "author": "Zihao Zhao; Eric Wallace; Shi Feng; Dan Klein; Sameer Singh", "abstract": "GPT-3 can perform numerous tasks when provided a natural language prompt that contains a few training examples. We show that this type of few-shot learning can be unstable: the choice of prompt format, training examples, and even the order of the examples can cause accuracy to vary from near chance to near state-of-the-art. We demonstrate that this instability arises from the bias of language models towards predicting certain answers, e.g., those that are placed near the end of the prompt or are common in the pre-training data. To mitigate this, we first estimate the model\u2019s bias towards each answer by asking for its prediction when given a training prompt and a content-free test input such as \"N/A\". We then fit calibration parameters that cause the prediction for this input to be uniform across answers. On a diverse set of tasks, this contextual calibration procedure substantially improves GPT-3 and GPT-2\u2019s accuracy (up to 30.0% absolute) across different choices of the prompt, while also making learning considerably more stable.", "bibtex": "@InProceedings{pmlr-v139-zhao21c,\n title = \t {Calibrate Before Use: Improving Few-shot Performance of Language Models},\n author = {Zhao, Zihao and Wallace, Eric and Feng, Shi and Klein, Dan and Singh, Sameer},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12697--12706},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhao21c/zhao21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhao21c.html},\n abstract = \t {GPT-3 can perform numerous tasks when provided a natural language prompt that contains a few training examples. We show that this type of few-shot learning can be unstable: the choice of prompt format, training examples, and even the order of the examples can cause accuracy to vary from near chance to near state-of-the-art. We demonstrate that this instability arises from the bias of language models towards predicting certain answers, e.g., those that are placed near the end of the prompt or are common in the pre-training data. To mitigate this, we first estimate the model\u2019s bias towards each answer by asking for its prediction when given a training prompt and a content-free test input such as \"N/A\". We then fit calibration parameters that cause the prediction for this input to be uniform across answers. On a diverse set of tasks, this contextual calibration procedure substantially improves GPT-3 and GPT-2\u2019s accuracy (up to 30.0% absolute) across different choices of the prompt, while also making learning considerably more stable.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhao21c/zhao21c.pdf", "supp": "", "pdf_size": 2606015, "gs_citation": 1480, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8877771337173887679&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "UC Berkeley; UC Berkeley; University of Maryland; UC Berkeley; UC Irvine", "aff_domain": "berkeley.edu;berkeley.edu;umd.edu;cs.berkeley.edu;uci.edu", "email": "berkeley.edu;berkeley.edu;umd.edu;cs.berkeley.edu;uci.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhao21c.html", "aff_unique_index": "0;0;1;0;2", "aff_unique_norm": "University of California, Berkeley;University of Maryland;University of California, Irvine", "aff_unique_dep": ";;", "aff_unique_url": "https://www.berkeley.edu;https://www/umd.edu;https://www.uci.edu", "aff_unique_abbr": "UC Berkeley;UMD;UCI", "aff_campus_unique_index": "0;0;0;2", "aff_campus_unique": "Berkeley;;Irvine", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Can Subnetwork Structure Be the Key to Out-of-Distribution Generalization?", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9481", "id": "9481", "proceeding": "http://proceedings.mlr.press/v139/zhang21a.html", "slides": "", "author_site": "Dinghuai Zhang, Kartik Ahuja, Yilun Xu, Yisen Wang, Aaron Courville", "author": "Dinghuai Zhang; Kartik Ahuja; Yilun Xu; Yisen Wang; Aaron Courville", "abstract": "Can models with particular structure avoid being biased towards spurious correlation in out-of-distribution (OOD) generalization? Peters et al. (2016) provides a positive answer for linear cases. In this paper, we use a functional modular probing method to analyze deep model structures under OOD setting. We demonstrate that even in biased models (which focus on spurious correlation) there still exist unbiased functional subnetworks. Furthermore, we articulate and confirm the functional lottery ticket hypothesis: the full network contains a subnetwork with proper structure that can achieve better OOD performance. We then propose Modular Risk Minimization to solve the subnetwork selection problem. Our algorithm learns the functional structure from a given dataset, and can be combined with any other OOD regularization methods. Experiments on various OOD generalization tasks corroborate the effectiveness of our method.", "bibtex": "@InProceedings{pmlr-v139-zhang21a,\n title = \t {Can Subnetwork Structure Be the Key to Out-of-Distribution Generalization?},\n author = {Zhang, Dinghuai and Ahuja, Kartik and Xu, Yilun and Wang, Yisen and Courville, Aaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12356--12367},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21a/zhang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21a.html},\n abstract = \t {Can models with particular structure avoid being biased towards spurious correlation in out-of-distribution (OOD) generalization? Peters et al. (2016) provides a positive answer for linear cases. In this paper, we use a functional modular probing method to analyze deep model structures under OOD setting. We demonstrate that even in biased models (which focus on spurious correlation) there still exist unbiased functional subnetworks. Furthermore, we articulate and confirm the functional lottery ticket hypothesis: the full network contains a subnetwork with proper structure that can achieve better OOD performance. We then propose Modular Risk Minimization to solve the subnetwork selection problem. Our algorithm learns the functional structure from a given dataset, and can be combined with any other OOD regularization methods. Experiments on various OOD generalization tasks corroborate the effectiveness of our method.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21a/zhang21a.pdf", "supp": "", "pdf_size": 1967262, "gs_citation": 109, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15879924505927996377&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Mila - Quebec AI Institute; Mila - Quebec AI Institute; CSAIL, Massachusetts Institute of Technology; Key Lab of Machine Perception (MoE), School of EECS, Peking University; Mila - Quebec AI Institute", "aff_domain": "mila.quebec; ; ; ; ", "email": "mila.quebec; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhang21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Quebec AI Institute;Massachusetts Institute of Technology;Peking University", "aff_unique_dep": "AI Institute;Computer Science and Artificial Intelligence Laboratory;School of EECS", "aff_unique_url": "https://mila.quebec;https://www.csail.mit.edu;http://www.pku.edu.cn", "aff_unique_abbr": "Mila;MIT;PKU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;1;2;0", "aff_country_unique": "Canada;United States;China" }, { "title": "Catastrophic Fisher Explosion: Early Phase Fisher Matrix Impacts Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8485", "id": "8485", "proceeding": "http://proceedings.mlr.press/v139/jastrzebski21a.html", "slides": "", "author_site": "Stanislaw Jastrzebski, Devansh Arpit, Oliver Astrand, Giancarlo Kerg, Huan Wang, Caiming Xiong, Richard Socher, Kyunghyun Cho, Krzysztof J Geras", "author": "Stanislaw Jastrzebski; Devansh Arpit; Oliver Astrand; Giancarlo B Kerg; Huan Wang; Caiming Xiong; Richard Socher; Kyunghyun Cho; Krzysztof J Geras", "abstract": "The early phase of training a deep neural network has a dramatic effect on the local curvature of the loss function. For instance, using a small learning rate does not guarantee stable optimization because the optimization trajectory has a tendency to steer towards regions of the loss surface with increasing local curvature. We ask whether this tendency is connected to the widely observed phenomenon that the choice of the learning rate strongly influences generalization. We first show that stochastic gradient descent (SGD) implicitly penalizes the trace of the Fisher Information Matrix (FIM), a measure of the local curvature, from the start of training. We argue it is an implicit regularizer in SGD by showing that explicitly penalizing the trace of the FIM can significantly improve generalization. We highlight that poor final generalization coincides with the trace of the FIM attaining a large value early in training, to which we refer as catastrophic Fisher explosion. Finally, to gain insight into the regularization effect of penalizing the trace of the FIM, we show that it limits memorization by reducing the learning speed of examples with noisy labels more than that of the examples with clean labels.", "bibtex": "@InProceedings{pmlr-v139-jastrzebski21a,\n title = \t {Catastrophic Fisher Explosion: Early Phase Fisher Matrix Impacts Generalization},\n author = {Jastrzebski, Stanislaw and Arpit, Devansh and Astrand, Oliver and Kerg, Giancarlo B and Wang, Huan and Xiong, Caiming and Socher, Richard and Cho, Kyunghyun and Geras, Krzysztof J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4772--4784},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jastrzebski21a/jastrzebski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jastrzebski21a.html},\n abstract = \t {The early phase of training a deep neural network has a dramatic effect on the local curvature of the loss function. For instance, using a small learning rate does not guarantee stable optimization because the optimization trajectory has a tendency to steer towards regions of the loss surface with increasing local curvature. We ask whether this tendency is connected to the widely observed phenomenon that the choice of the learning rate strongly influences generalization. We first show that stochastic gradient descent (SGD) implicitly penalizes the trace of the Fisher Information Matrix (FIM), a measure of the local curvature, from the start of training. We argue it is an implicit regularizer in SGD by showing that explicitly penalizing the trace of the FIM can significantly improve generalization. We highlight that poor final generalization coincides with the trace of the FIM attaining a large value early in training, to which we refer as catastrophic Fisher explosion. Finally, to gain insight into the regularization effect of penalizing the trace of the FIM, we show that it limits memorization by reducing the learning speed of examples with noisy labels more than that of the examples with clean labels.}\n}", "pdf": "http://proceedings.mlr.press/v139/jastrzebski21a/jastrzebski21a.pdf", "supp": "", "pdf_size": 1344006, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7041812310235369320&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 12, "aff": ";;;;;;;;", "aff_domain": ";;;;;;;;", "email": ";;;;;;;;", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/jastrzebski21a.html" }, { "title": "Catformer: Designing Stable Transformers via Sensitivity Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9035", "id": "9035", "proceeding": "http://proceedings.mlr.press/v139/davis21a.html", "slides": "", "author_site": "Jared Quincy Davis, Albert Gu, Krzysztof Choromanski, Tri Dao, Christopher Re, Chelsea Finn, Percy Liang", "author": "Jared Q Davis; Albert Gu; Krzysztof Choromanski; Tri Dao; Christopher Re; Chelsea Finn; Percy Liang", "abstract": "Transformer architectures are widely used, but training them is non-trivial, requiring custom learning rate schedules, scaling terms, residual connections, careful placement of submodules such as normalization, and so on. In this paper, we improve upon recent analysis of Transformers and formalize a notion of sensitivity to capture the difficulty of training. Sensitivity characterizes how the variance of activation and gradient norms change in expectation when parameters are randomly perturbed. We analyze the sensitivity of previous Transformer architectures and design a new architecture, the Catformer, which replaces residual connections or RNN-based gating mechanisms with concatenation. We prove that Catformers are less sensitive than other Transformer variants and demonstrate that this leads to more stable training. On DMLab30, a suite of high-dimension reinforcement tasks, Catformer outperforms other transformers, including Gated Transformer-XL\u2014the state-of-the-art architecture designed to address stability\u2014by 13%.", "bibtex": "@InProceedings{pmlr-v139-davis21a,\n title = \t {Catformer: Designing Stable Transformers via Sensitivity Analysis},\n author = {Davis, Jared Q and Gu, Albert and Choromanski, Krzysztof and Dao, Tri and Re, Christopher and Finn, Chelsea and Liang, Percy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2489--2499},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/davis21a/davis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/davis21a.html},\n abstract = \t {Transformer architectures are widely used, but training them is non-trivial, requiring custom learning rate schedules, scaling terms, residual connections, careful placement of submodules such as normalization, and so on. In this paper, we improve upon recent analysis of Transformers and formalize a notion of sensitivity to capture the difficulty of training. Sensitivity characterizes how the variance of activation and gradient norms change in expectation when parameters are randomly perturbed. We analyze the sensitivity of previous Transformer architectures and design a new architecture, the Catformer, which replaces residual connections or RNN-based gating mechanisms with concatenation. We prove that Catformers are less sensitive than other Transformer variants and demonstrate that this leads to more stable training. On DMLab30, a suite of high-dimension reinforcement tasks, Catformer outperforms other transformers, including Gated Transformer-XL\u2014the state-of-the-art architecture designed to address stability\u2014by 13%.}\n}", "pdf": "http://proceedings.mlr.press/v139/davis21a/davis21a.pdf", "supp": "", "pdf_size": 633706, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12003062278077911460&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Stanford University+DeepMind; Stanford University; Google Brain Robotics+Columbia University; Stanford University; Stanford University; Stanford University+Google Brain Robotics; Stanford University", "aff_domain": "gmail.com; ; ; ; ; ; ", "email": "gmail.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/davis21a.html", "aff_unique_index": "0+1;0;2+3;0;0;0+2;0", "aff_unique_norm": "Stanford University;DeepMind;Google;Columbia University", "aff_unique_dep": ";;Google Brain Robotics;", "aff_unique_url": "https://www.stanford.edu;https://deepmind.com;https://ai.google;https://www.columbia.edu", "aff_unique_abbr": "Stanford;DeepMind;Google Brain Robotics;Columbia", "aff_campus_unique_index": "0;0;2;0;0;0+2;0", "aff_campus_unique": "Stanford;;Mountain View", "aff_country_unique_index": "0+1;0;0+0;0;0;0+0;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Causal Curiosity: RL Agents Discovering Self-supervised Experiments for Causal Representation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10025", "id": "10025", "proceeding": "http://proceedings.mlr.press/v139/sontakke21a.html", "slides": "", "author_site": "Sumedh Sontakke, Arash Mehrjou, Laurent Itti, Bernhard Sch\u00f6lkopf", "author": "Sumedh A Sontakke; Arash Mehrjou; Laurent Itti; Bernhard Sch\u00f6lkopf", "abstract": "Humans show an innate ability to learn the regularities of the world through interaction. By performing experiments in our environment, we are able to discern the causal factors of variation and infer how they affect the dynamics of our world. Analogously, here we attempt to equip reinforcement learning agents with the ability to perform experiments that facilitate a categorization of the rolled-out trajectories, and to subsequently infer the causal factors of the environment in a hierarchical manner. We introduce a novel intrinsic reward, called causal curiosity, and show that it allows our agents to learn optimal sequences of actions, and to discover causal factors in the dynamics. The learned behavior allows the agent to infer a binary quantized representation for the ground-truth causal factors in every environment. Additionally, we find that these experimental behaviors are semantically meaningful (e.g., to differentiate between heavy and light blocks, our agents learn to lift them), and are learnt in a self-supervised manner with approximately 2.5 times less data than conventional supervised planners. We show that these behaviors can be re-purposed and fine-tuned (e.g., from lifting to pushing or other downstream tasks). Finally, we show that the knowledge of causal factor representations aids zero-shot learning for more complex tasks.", "bibtex": "@InProceedings{pmlr-v139-sontakke21a,\n title = \t {Causal Curiosity: RL Agents Discovering Self-supervised Experiments for Causal Representation Learning},\n author = {Sontakke, Sumedh A and Mehrjou, Arash and Itti, Laurent and Sch{\\\"o}lkopf, Bernhard},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9848--9858},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sontakke21a/sontakke21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sontakke21a.html},\n abstract = \t {Humans show an innate ability to learn the regularities of the world through interaction. By performing experiments in our environment, we are able to discern the causal factors of variation and infer how they affect the dynamics of our world. Analogously, here we attempt to equip reinforcement learning agents with the ability to perform experiments that facilitate a categorization of the rolled-out trajectories, and to subsequently infer the causal factors of the environment in a hierarchical manner. We introduce a novel intrinsic reward, called causal curiosity, and show that it allows our agents to learn optimal sequences of actions, and to discover causal factors in the dynamics. The learned behavior allows the agent to infer a binary quantized representation for the ground-truth causal factors in every environment. Additionally, we find that these experimental behaviors are semantically meaningful (e.g., to differentiate between heavy and light blocks, our agents learn to lift them), and are learnt in a self-supervised manner with approximately 2.5 times less data than conventional supervised planners. We show that these behaviors can be re-purposed and fine-tuned (e.g., from lifting to pushing or other downstream tasks). Finally, we show that the knowledge of causal factor representations aids zero-shot learning for more complex tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/sontakke21a/sontakke21a.pdf", "supp": "", "pdf_size": 2642909, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10455299841806261684&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Southern California; Max Planck Institute for Intelligent Systems; University of Southern California; Max Planck Institute for Intelligent Systems", "aff_domain": "usc.edu; ; ; ", "email": "usc.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sontakke21a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "University of Southern California;Max Planck Institute for Intelligent Systems", "aff_unique_dep": ";Intelligent Systems", "aff_unique_url": "https://www.usc.edu;https://www.mpi-is.mpg.de", "aff_unique_abbr": "USC;MPI-IS", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "United States;Germany" }, { "title": "Causality-aware counterfactual confounding adjustment as an alternative to linear residualization in anticausal prediction tasks based on linear learners", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9607", "id": "9607", "proceeding": "http://proceedings.mlr.press/v139/neto21a.html", "slides": "", "author": "Elias Chaibub Neto", "abstract": "Linear residualization is a common practice for confounding adjustment in machine learning applications. Recently, causality-aware predictive modeling has been proposed as an alternative causality-inspired approach for adjusting for confounders. In this paper, we compare the linear residualization approach against the causality-aware confounding adjustment in anticausal prediction tasks. Our comparisons include both the settings where the training and test sets come from the same distributions, as well as, when the training and test sets are shifted due to selection biases. In the absence of dataset shifts, we show that the causality-aware approach tends to (asymptotically) outperform the residualization adjustment in terms of predictive performance in linear learners. Importantly, our results still holds even when the true model generating the data is not linear. We illustrate our results in both regression and classification tasks. Furthermore, in the presence of dataset shifts in the joint distribution of the confounders and outcome variables, we show that the causality-aware approach is more stable than linear residualization.", "bibtex": "@InProceedings{pmlr-v139-neto21a,\n title = \t {Causality-aware counterfactual confounding adjustment as an alternative to linear residualization in anticausal prediction tasks based on linear learners},\n author = {Neto, Elias Chaibub},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8034--8044},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/neto21a/neto21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/neto21a.html},\n abstract = \t {Linear residualization is a common practice for confounding adjustment in machine learning applications. Recently, causality-aware predictive modeling has been proposed as an alternative causality-inspired approach for adjusting for confounders. In this paper, we compare the linear residualization approach against the causality-aware confounding adjustment in anticausal prediction tasks. Our comparisons include both the settings where the training and test sets come from the same distributions, as well as, when the training and test sets are shifted due to selection biases. In the absence of dataset shifts, we show that the causality-aware approach tends to (asymptotically) outperform the residualization adjustment in terms of predictive performance in linear learners. Importantly, our results still holds even when the true model generating the data is not linear. We illustrate our results in both regression and classification tasks. Furthermore, in the presence of dataset shifts in the joint distribution of the confounders and outcome variables, we show that the causality-aware approach is more stable than linear residualization.}\n}", "pdf": "http://proceedings.mlr.press/v139/neto21a/neto21a.pdf", "supp": "", "pdf_size": 513502, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12437546450132086925&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Sage Bionetworks, Seattle, United States", "aff_domain": "sagebase.org", "email": "sagebase.org", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/neto21a.html", "aff_unique_index": "0", "aff_unique_norm": "Sage Bionetworks", "aff_unique_dep": "", "aff_unique_url": "https://sagebionetworks.org", "aff_unique_abbr": "", "aff_campus_unique_index": "0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "ChaCha for Online AutoML", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10137", "id": "10137", "proceeding": "http://proceedings.mlr.press/v139/wu21d.html", "slides": "", "author_site": "Qingyun Wu, Chi Wang, John Langford, Paul Mineiro, Marco Rossi", "author": "Qingyun Wu; Chi Wang; John Langford; Paul Mineiro; Marco Rossi", "abstract": "We propose the ChaCha (Champion-Challengers) algorithm for making an online choice of hyperparameters in online learning settings. ChaCha handles the process of determining a champion and scheduling a set of \u2018live\u2019 challengers over time based on sample complexity bounds. It is guaranteed to have sublinear regret after the optimal configuration is added into consideration by an application-dependent oracle based on the champions. Empirically, we show that ChaCha provides good performance across a wide array of datasets when optimizing over featurization and hyperparameter decisions.", "bibtex": "@InProceedings{pmlr-v139-wu21d,\n title = \t {ChaCha for Online AutoML},\n author = {Wu, Qingyun and Wang, Chi and Langford, John and Mineiro, Paul and Rossi, Marco},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11263--11273},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21d/wu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21d.html},\n abstract = \t {We propose the ChaCha (Champion-Challengers) algorithm for making an online choice of hyperparameters in online learning settings. ChaCha handles the process of determining a champion and scheduling a set of \u2018live\u2019 challengers over time based on sample complexity bounds. It is guaranteed to have sublinear regret after the optimal configuration is added into consideration by an application-dependent oracle based on the champions. Empirically, we show that ChaCha provides good performance across a wide array of datasets when optimizing over featurization and hyperparameter decisions.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21d/wu21d.pdf", "supp": "", "pdf_size": 956778, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15774579199663385941&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research", "aff_domain": "psu.edu; ;microsoft.com; ; ", "email": "psu.edu; ;microsoft.com; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wu21d.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Characterizing Fairness Over the Set of Good Models Under Selective Labels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8471", "id": "8471", "proceeding": "http://proceedings.mlr.press/v139/coston21a.html", "slides": "/media/icml-2021/Slides/8471.pdf", "author_site": "Amanda Coston, Ashesh Rambachan, Alexandra Chouldechova", "author": "Amanda Coston; Ashesh Rambachan; Alexandra Chouldechova", "abstract": "Algorithmic risk assessments are used to inform decisions in a wide variety of high-stakes settings. Often multiple predictive models deliver similar overall performance but differ markedly in their predictions for individual cases, an empirical phenomenon known as the \u201cRashomon Effect.\u201d These models may have different properties over various groups, and therefore have different predictive fairness properties. We develop a framework for characterizing predictive fairness properties over the set of models that deliver similar overall performance, or \u201cthe set of good models.\u201d Our framework addresses the empirically relevant challenge of selectively labelled data in the setting where the selection decision and outcome are unconfounded given the observed data features. Our framework can be used to 1) audit for predictive bias; or 2) replace an existing model with one that has better fairness properties. We illustrate these use cases on a recidivism prediction task and a real-world credit-scoring task.", "bibtex": "@InProceedings{pmlr-v139-coston21a,\n title = \t {Characterizing Fairness Over the Set of Good Models Under Selective Labels},\n author = {Coston, Amanda and Rambachan, Ashesh and Chouldechova, Alexandra},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2144--2155},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/coston21a/coston21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/coston21a.html},\n abstract = \t {Algorithmic risk assessments are used to inform decisions in a wide variety of high-stakes settings. Often multiple predictive models deliver similar overall performance but differ markedly in their predictions for individual cases, an empirical phenomenon known as the \u201cRashomon Effect.\u201d These models may have different properties over various groups, and therefore have different predictive fairness properties. We develop a framework for characterizing predictive fairness properties over the set of models that deliver similar overall performance, or \u201cthe set of good models.\u201d Our framework addresses the empirically relevant challenge of selectively labelled data in the setting where the selection decision and outcome are unconfounded given the observed data features. Our framework can be used to 1) audit for predictive bias; or 2) replace an existing model with one that has better fairness properties. We illustrate these use cases on a recidivism prediction task and a real-world credit-scoring task.}\n}", "pdf": "http://proceedings.mlr.press/v139/coston21a/coston21a.pdf", "supp": "", "pdf_size": 423928, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10938276634822622965&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Heinz College and Machine Learning Department, Carnegie Mellon University; Department of Economics, Harvard University; Heinz College, Carnegie Mellon University", "aff_domain": "andrew.cmu.edu;g.harvard.edu; ", "email": "andrew.cmu.edu;g.harvard.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/coston21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Carnegie Mellon University;Harvard University", "aff_unique_dep": "Heinz College and Machine Learning Department;Department of Economics", "aff_unique_url": "https://www.cmu.edu;https://www.harvard.edu", "aff_unique_abbr": "CMU;Harvard", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Characterizing Structural Regularities of Labeled Data in Overparameterized Models", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10133", "id": "10133", "proceeding": "http://proceedings.mlr.press/v139/jiang21k.html", "slides": "/media/icml-2021/Slides/10133.pdf", "author_site": "Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, Michael Mozer", "author": "Ziheng Jiang; Chiyuan Zhang; Kunal Talwar; Michael C Mozer", "abstract": "Humans are accustomed to environments that contain both regularities and exceptions. For example, at most gas stations, one pays prior to pumping, but the occasional rural station does not accept payment in advance. Likewise, deep neural networks can generalize across instances that share common patterns or structures, yet have the capacity to memorize rare or irregular forms. We analyze how individual instances are treated by a model via a consistency score. The score characterizes the expected accuracy for a held-out instance given training sets of varying size sampled from the data distribution. We obtain empirical estimates of this score for individual instances in multiple data sets, and we show that the score identifies out-of-distribution and mislabeled examples at one end of the continuum and strongly regular examples at the other end. We identify computationally inexpensive proxies to the consistency score using statistics collected during training. We apply the score toward understanding the dynamics of representation learning and to filter outliers during training.", "bibtex": "@InProceedings{pmlr-v139-jiang21k,\n title = \t {Characterizing Structural Regularities of Labeled Data in Overparameterized Models},\n author = {Jiang, Ziheng and Zhang, Chiyuan and Talwar, Kunal and Mozer, Michael C},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5034--5044},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21k/jiang21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21k.html},\n abstract = \t {Humans are accustomed to environments that contain both regularities and exceptions. For example, at most gas stations, one pays prior to pumping, but the occasional rural station does not accept payment in advance. Likewise, deep neural networks can generalize across instances that share common patterns or structures, yet have the capacity to memorize rare or irregular forms. We analyze how individual instances are treated by a model via a consistency score. The score characterizes the expected accuracy for a held-out instance given training sets of varying size sampled from the data distribution. We obtain empirical estimates of this score for individual instances in multiple data sets, and we show that the score identifies out-of-distribution and mislabeled examples at one end of the continuum and strongly regular examples at the other end. We identify computationally inexpensive proxies to the consistency score using statistics collected during training. We apply the score toward understanding the dynamics of representation learning and to filter outliers during training.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21k/jiang21k.pdf", "supp": "", "pdf_size": 8019774, "gs_citation": 119, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8265006175243503341&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Paul G. Allen School of Computer Science, University of Washington, Seattle, WA, USA + OctoML.ai, Seattle, WA, USA + Google; Google Research, Brain Team, Mountain View, CA, USA + Apple Inc., Cupertino, CA, USA; Google Research, Brain Team, Mountain View, CA, USA + Apple Inc., Cupertino, CA, USA; Department of Computer Science, University of Colorado Boulder, Boulder, CO, USA", "aff_domain": "uw.edu;google.com;apple.com;colorado.edu", "email": "uw.edu;google.com;apple.com;colorado.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jiang21k.html", "aff_unique_index": "0+1+2;2+3;2+3;4", "aff_unique_norm": "University of Washington;OctoML.ai;Google;Apple;University of Colorado Boulder", "aff_unique_dep": "Paul G. Allen School of Computer Science;;Google;Apple Inc.;Department of Computer Science", "aff_unique_url": "https://www.washington.edu;;https://www.google.com;https://www.apple.com;https://www.colorado.edu", "aff_unique_abbr": "UW;;Google;Apple;CU Boulder", "aff_campus_unique_index": "0+0+1;1+2;1+2;3", "aff_campus_unique": "Seattle;Mountain View;Cupertino;Boulder", "aff_country_unique_index": "0+0+0;0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Characterizing the Gap Between Actor-Critic and Policy Gradient", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9711", "id": "9711", "proceeding": "http://proceedings.mlr.press/v139/wen21b.html", "slides": "", "author_site": "Junfeng Wen, Saurabh Kumar, Ramki Gummadi, Dale Schuurmans", "author": "Junfeng Wen; Saurabh Kumar; Ramki Gummadi; Dale Schuurmans", "abstract": "Actor-critic (AC) methods are ubiquitous in reinforcement learning. Although it is understood that AC methods are closely related to policy gradient (PG), their precise connection has not been fully characterized previously. In this paper, we explain the gap between AC and PG methods by identifying the exact adjustment to the AC objective/gradient that recovers the true policy gradient of the cumulative reward objective (PG). Furthermore, by viewing the AC method as a two-player Stackelberg game between the actor and critic, we show that the Stackelberg policy gradient can be recovered as a special case of our more general analysis. Based on these results, we develop practical algorithms, Residual Actor-Critic and Stackelberg Actor-Critic, for estimating the correction between AC and PG and use these to modify the standard AC algorithm. Experiments on popular tabular and continuous environments show the proposed corrections can improve both the sample efficiency and final performance of existing AC methods.", "bibtex": "@InProceedings{pmlr-v139-wen21b,\n title = \t {Characterizing the Gap Between Actor-Critic and Policy Gradient},\n author = {Wen, Junfeng and Kumar, Saurabh and Gummadi, Ramki and Schuurmans, Dale},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11101--11111},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wen21b/wen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wen21b.html},\n abstract = \t {Actor-critic (AC) methods are ubiquitous in reinforcement learning. Although it is understood that AC methods are closely related to policy gradient (PG), their precise connection has not been fully characterized previously. In this paper, we explain the gap between AC and PG methods by identifying the exact adjustment to the AC objective/gradient that recovers the true policy gradient of the cumulative reward objective (PG). Furthermore, by viewing the AC method as a two-player Stackelberg game between the actor and critic, we show that the Stackelberg policy gradient can be recovered as a special case of our more general analysis. Based on these results, we develop practical algorithms, Residual Actor-Critic and Stackelberg Actor-Critic, for estimating the correction between AC and PG and use these to modify the standard AC algorithm. Experiments on popular tabular and continuous environments show the proposed corrections can improve both the sample efficiency and final performance of existing AC methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/wen21b/wen21b.pdf", "supp": "", "pdf_size": 2272376, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5948270474707197620&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computing Science, University of Alberta; Stanford University; Google Brain; Department of Computing Science, University of Alberta + Google Brain", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wen21b.html", "aff_unique_index": "0;1;2;0+2", "aff_unique_norm": "University of Alberta;Stanford University;Google", "aff_unique_dep": "Department of Computing Science;;Google Brain", "aff_unique_url": "https://www.ualberta.ca;https://www.stanford.edu;https://brain.google.com", "aff_unique_abbr": "UAlberta;Stanford;Google Brain", "aff_campus_unique_index": "1;2;2", "aff_campus_unique": ";Stanford;Mountain View", "aff_country_unique_index": "0;1;1;0+1", "aff_country_unique": "Canada;United States" }, { "title": "Chebyshev Polynomial Codes: Task Entanglement-based Coding for Distributed Matrix Multiplication", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9255", "id": "9255", "proceeding": "http://proceedings.mlr.press/v139/hong21b.html", "slides": "", "author_site": "Sangwoo Hong, Heecheol Yang, Youngseok Yoon, Tae Hyun Cho, Jungwoo Lee", "author": "Sangwoo Hong; Heecheol Yang; Youngseok Yoon; Taehyun Cho; Jungwoo Lee", "abstract": "Distributed computing has been a prominent solution to efficiently process massive datasets in parallel. However, the existence of stragglers is one of the major concerns that slows down the overall speed of distributed computing. To deal with this problem, we consider a distributed matrix multiplication scenario where a master assigns multiple tasks to each worker to exploit stragglers\u2019 computing ability (which is typically wasted in conventional distributed computing). We propose Chebyshev polynomial codes, which can achieve order-wise improvement in encoding complexity at the master and communication load in distributed matrix multiplication using task entanglement. The key idea of task entanglement is to reduce the number of encoded matrices for multiple tasks assigned to each worker by intertwining encoded matrices. We experimentally demonstrate that, in cloud environments, Chebyshev polynomial codes can provide significant reduction in overall processing time in distributed computing for matrix multiplication, which is a key computational component in modern deep learning.", "bibtex": "@InProceedings{pmlr-v139-hong21b,\n title = \t {Chebyshev Polynomial Codes: Task Entanglement-based Coding for Distributed Matrix Multiplication},\n author = {Hong, Sangwoo and Yang, Heecheol and Yoon, Youngseok and Cho, Taehyun and Lee, Jungwoo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4319--4327},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hong21b/hong21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/hong21b.html},\n abstract = \t {Distributed computing has been a prominent solution to efficiently process massive datasets in parallel. However, the existence of stragglers is one of the major concerns that slows down the overall speed of distributed computing. To deal with this problem, we consider a distributed matrix multiplication scenario where a master assigns multiple tasks to each worker to exploit stragglers\u2019 computing ability (which is typically wasted in conventional distributed computing). We propose Chebyshev polynomial codes, which can achieve order-wise improvement in encoding complexity at the master and communication load in distributed matrix multiplication using task entanglement. The key idea of task entanglement is to reduce the number of encoded matrices for multiple tasks assigned to each worker by intertwining encoded matrices. We experimentally demonstrate that, in cloud environments, Chebyshev polynomial codes can provide significant reduction in overall processing time in distributed computing for matrix multiplication, which is a key computational component in modern deep learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/hong21b/hong21b.pdf", "supp": "", "pdf_size": 1367446, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17271092537103909645&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Communications and Machine Learning Lab., Department of Electrical and Computer Engineering, Seoul National University, Seoul, 08826, South Korea; Division of Computer Convergence, Chungnam National University, Daejeon, 34134, South Korea + Communications and Machine Learning Lab., Department of Electrical and Computer Engineering, Seoul National University, Seoul, 08826, South Korea; Communications and Machine Learning Lab., Department of Electrical and Computer Engineering, Seoul National University, Seoul, 08826, South Korea; Communications and Machine Learning Lab., Department of Electrical and Computer Engineering, Seoul National University, Seoul, 08826, South Korea; Communications and Machine Learning Lab., Department of Electrical and Computer Engineering, Seoul National University, Seoul, 08826, South Korea", "aff_domain": "snu.ac.kr;cnu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", "email": "snu.ac.kr;cnu.ac.kr;snu.ac.kr;snu.ac.kr;snu.ac.kr", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hong21b.html", "aff_unique_index": "0;1+0;0;0;0", "aff_unique_norm": "Seoul National University;Chungnam National University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Division of Computer Convergence", "aff_unique_url": "https://www.snu.ac.kr;http://www.cnu.ac.kr", "aff_unique_abbr": "SNU;CNU", "aff_campus_unique_index": "0;1+0;0;0;0", "aff_campus_unique": "Seoul;Daejeon", "aff_country_unique_index": "0;0+0;0;0;0", "aff_country_unique": "South Korea" }, { "title": "Class2Simi: A Noise Reduction Perspective on Learning with Noisy Labels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9641", "id": "9641", "proceeding": "http://proceedings.mlr.press/v139/wu21f.html", "slides": "", "author_site": "Songhua Wu, Xiaobo Xia, Tongliang Liu, Bo Han, Mingming Gong, Nannan Wang, Haifeng Liu, Gang Niu", "author": "Songhua Wu; Xiaobo Xia; Tongliang Liu; Bo Han; Mingming Gong; Nannan Wang; Haifeng Liu; Gang Niu", "abstract": "Learning with noisy labels has attracted a lot of attention in recent years, where the mainstream approaches are in \\emph{pointwise} manners. Meanwhile, \\emph{pairwise} manners have shown great potential in supervised metric learning and unsupervised contrastive learning. Thus, a natural question is raised: does learning in a pairwise manner \\emph{mitigate} label noise? To give an affirmative answer, in this paper, we propose a framework called \\emph{Class2Simi}: it transforms data points with noisy \\emph{class labels} to data pairs with noisy \\emph{similarity labels}, where a similarity label denotes whether a pair shares the class label or not. Through this transformation, the \\emph{reduction of the noise rate} is theoretically guaranteed, and hence it is in principle easier to handle noisy similarity labels. Amazingly, DNNs that predict the \\emph{clean} class labels can be trained from noisy data pairs if they are first pretrained from noisy data points. Class2Simi is \\emph{computationally efficient} because not only this transformation is on-the-fly in mini-batches, but also it just changes loss computation on top of model prediction into a pairwise manner. Its effectiveness is verified by extensive experiments.", "bibtex": "@InProceedings{pmlr-v139-wu21f,\n title = \t {Class2Simi: A Noise Reduction Perspective on Learning with Noisy Labels},\n author = {Wu, Songhua and Xia, Xiaobo and Liu, Tongliang and Han, Bo and Gong, Mingming and Wang, Nannan and Liu, Haifeng and Niu, Gang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11285--11295},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21f/wu21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21f.html},\n abstract = \t {Learning with noisy labels has attracted a lot of attention in recent years, where the mainstream approaches are in \\emph{pointwise} manners. Meanwhile, \\emph{pairwise} manners have shown great potential in supervised metric learning and unsupervised contrastive learning. Thus, a natural question is raised: does learning in a pairwise manner \\emph{mitigate} label noise? To give an affirmative answer, in this paper, we propose a framework called \\emph{Class2Simi}: it transforms data points with noisy \\emph{class labels} to data pairs with noisy \\emph{similarity labels}, where a similarity label denotes whether a pair shares the class label or not. Through this transformation, the \\emph{reduction of the noise rate} is theoretically guaranteed, and hence it is in principle easier to handle noisy similarity labels. Amazingly, DNNs that predict the \\emph{clean} class labels can be trained from noisy data pairs if they are first pretrained from noisy data points. Class2Simi is \\emph{computationally efficient} because not only this transformation is on-the-fly in mini-batches, but also it just changes loss computation on top of model prediction into a pairwise manner. Its effectiveness is verified by extensive experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21f/wu21f.pdf", "supp": "", "pdf_size": 1018120, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13919468432483521067&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Trustworthy Machine Learning Lab, School of Computer Science, The University of Sydney; Trustworthy Machine Learning Lab, School of Computer Science, The University of Sydney; Trustworthy Machine Learning Lab, School of Computer Science, The University of Sydney; Department of Computer Science, Hong Kong Baptist University; School of Mathematics and Statistics, The University of Melbourne; ISN State Key Laboratory, School of Telecommunications Engineering, Xidian University; Brain-Inspired Technology Co., Ltd.; RIKEN Center for Advanced Intelligence Project", "aff_domain": "sydney.edu.au;sydney.edu.au;sydney.edu.au;hkbu.edu.hk;unimelb.edu.au;xidian.edu.cn;brain-tech.com;riken.jp", "email": "sydney.edu.au;sydney.edu.au;sydney.edu.au;hkbu.edu.hk;unimelb.edu.au;xidian.edu.cn;brain-tech.com;riken.jp", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/wu21f.html", "aff_unique_index": "0;0;0;1;2;3;4;5", "aff_unique_norm": "University of Sydney;Hong Kong Baptist University;University of Melbourne;Xidian University;Brain-Inspired Technology;RIKEN", "aff_unique_dep": "School of Computer Science;Department of Computer Science;School of Mathematics and Statistics;School of Telecommunications Engineering;Technology;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.sydney.edu.au;https://www.hkbu.edu.hk;https://www.unimelb.edu.au;http://www.xidian.edu.cn/;;https://www.riken.jp/en/", "aff_unique_abbr": "USYD;HKBU;UniMelb;Xidian;;RIKEN", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Hong Kong SAR;Melbourne", "aff_country_unique_index": "0;0;0;1;0;1;1;2", "aff_country_unique": "Australia;China;Japan" }, { "title": "Classification with Rejection Based on Cost-sensitive Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10499", "id": "10499", "proceeding": "http://proceedings.mlr.press/v139/charoenphakdee21a.html", "slides": "/media/icml-2021/Slides/10499.pdf", "author_site": "Nontawat Charoenphakdee, Zhenghang Cui, Yivan Zhang, Masashi Sugiyama", "author": "Nontawat Charoenphakdee; Zhenghang Cui; Yivan Zhang; Masashi Sugiyama", "abstract": "The goal of classification with rejection is to avoid risky misclassification in error-critical applications such as medical diagnosis and product inspection. In this paper, based on the relationship between classification with rejection and cost-sensitive classification, we propose a novel method of classification with rejection by learning an ensemble of cost-sensitive classifiers, which satisfies all the following properties: (i) it can avoid estimating class-posterior probabilities, resulting in improved classification accuracy. (ii) it allows a flexible choice of losses including non-convex ones, (iii) it does not require complicated modifications when using different losses, (iv) it is applicable to both binary and multiclass cases, and (v) it is theoretically justifiable for any classification-calibrated loss. Experimental results demonstrate the usefulness of our proposed approach in clean-labeled, noisy-labeled, and positive-unlabeled classification.", "bibtex": "@InProceedings{pmlr-v139-charoenphakdee21a,\n title = \t {Classification with Rejection Based on Cost-sensitive Classification},\n author = {Charoenphakdee, Nontawat and Cui, Zhenghang and Zhang, Yivan and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1507--1517},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/charoenphakdee21a/charoenphakdee21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/charoenphakdee21a.html},\n abstract = \t {The goal of classification with rejection is to avoid risky misclassification in error-critical applications such as medical diagnosis and product inspection. In this paper, based on the relationship between classification with rejection and cost-sensitive classification, we propose a novel method of classification with rejection by learning an ensemble of cost-sensitive classifiers, which satisfies all the following properties: (i) it can avoid estimating class-posterior probabilities, resulting in improved classification accuracy. (ii) it allows a flexible choice of losses including non-convex ones, (iii) it does not require complicated modifications when using different losses, (iv) it is applicable to both binary and multiclass cases, and (v) it is theoretically justifiable for any classification-calibrated loss. Experimental results demonstrate the usefulness of our proposed approach in clean-labeled, noisy-labeled, and positive-unlabeled classification.}\n}", "pdf": "http://proceedings.mlr.press/v139/charoenphakdee21a/charoenphakdee21a.pdf", "supp": "", "pdf_size": 1874795, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3688807985860107524&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The University of Tokyo, Tokyo, Japan+RIKEN AIP, Tokyo, Japan; The University of Tokyo, Tokyo, Japan+RIKEN AIP, Tokyo, Japan; The University of Tokyo, Tokyo, Japan+RIKEN AIP, Tokyo, Japan; The University of Tokyo, Tokyo, Japan+RIKEN AIP, Tokyo, Japan", "aff_domain": "ms.k.u-tokyo.ac.jp; ; ; ", "email": "ms.k.u-tokyo.ac.jp; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/charoenphakdee21a.html", "aff_unique_index": "0+1;0+1;0+1;0+1", "aff_unique_norm": "University of Tokyo;RIKEN AIP", "aff_unique_dep": ";", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://aip.Riken.jp", "aff_unique_abbr": "UTokyo;RIKEN AIP", "aff_campus_unique_index": "0+0;0+0;0+0;0+0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Classifying high-dimensional Gaussian mixtures: Where kernel methods fail and neural networks succeed", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8535", "id": "8535", "proceeding": "http://proceedings.mlr.press/v139/refinetti21b.html", "slides": "", "author_site": "Maria Refinetti, Sebastian Goldt, FLORENT KRZAKALA, Lenka Zdeborova", "author": "Maria Refinetti; Sebastian Goldt; Florent Krzakala; Lenka Zdeborova", "abstract": "A recent series of theoretical works showed that the dynamics of neural networks with a certain initialisation are well-captured by kernel methods. Concurrent empirical work demonstrated that kernel methods can come close to the performance of neural networks on some image classification tasks. These results raise the question of whether neural networks only learn successfully if kernels also learn successfully, despite being the more expressive function class. Here, we show that two-layer neural networks with *only a few neurons* achieve near-optimal performance on high-dimensional Gaussian mixture classification while lazy training approaches such as random features and kernel methods do not. Our analysis is based on the derivation of a set of ordinary differential equations that exactly track the dynamics of the network and thus allow to extract the asymptotic performance of the network as a function of regularisation or signal-to-noise ratio. We also show how over-parametrising the neural network leads to faster convergence, but does not improve its final performance.", "bibtex": "@InProceedings{pmlr-v139-refinetti21b,\n title = \t {Classifying high-dimensional Gaussian mixtures: Where kernel methods fail and neural networks succeed},\n author = {Refinetti, Maria and Goldt, Sebastian and Krzakala, Florent and Zdeborova, Lenka},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8936--8947},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/refinetti21b/refinetti21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/refinetti21b.html},\n abstract = \t {A recent series of theoretical works showed that the dynamics of neural networks with a certain initialisation are well-captured by kernel methods. Concurrent empirical work demonstrated that kernel methods can come close to the performance of neural networks on some image classification tasks. These results raise the question of whether neural networks only learn successfully if kernels also learn successfully, despite being the more expressive function class. Here, we show that two-layer neural networks with *only a few neurons* achieve near-optimal performance on high-dimensional Gaussian mixture classification while lazy training approaches such as random features and kernel methods do not. Our analysis is based on the derivation of a set of ordinary differential equations that exactly track the dynamics of the network and thus allow to extract the asymptotic performance of the network as a function of regularisation or signal-to-noise ratio. We also show how over-parametrising the neural network leads to faster convergence, but does not improve its final performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/refinetti21b/refinetti21b.pdf", "supp": "", "pdf_size": 3181665, "gs_citation": 105, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2175676811548405487&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Laboratoire de Physique de l\u2019\u00c9cole Normale Sup\u00e9rieure, Universit\u00e9 PSL, CNRS, Sorbonne Universit\u00e9, Universit\u00e9 Paris-Diderot, Sorbonne Paris Cit\u00e9+IdePHICS Lab, EPFL; International School of Advanced Studies (SISSA), Trieste, Italy; IdePHICS Lab, EPFL+SPOC Lab, EPFL; SPOC Lab, EPFL", "aff_domain": "ens.fr; ; ; ", "email": "ens.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/refinetti21b.html", "aff_unique_index": "0+1;2;1+1;1", "aff_unique_norm": "\u00c9cole Normale Sup\u00e9rieure;EPFL;International School of Advanced Studies", "aff_unique_dep": "Laboratoire de Physique;IdePHICS Lab;", "aff_unique_url": "https://www.ens.fr;https://www.epfl.ch;https://www.sissa.it", "aff_unique_abbr": "ENS;EPFL;SISSA", "aff_campus_unique_index": ";1;", "aff_campus_unique": ";Trieste", "aff_country_unique_index": "0+1;2;1+1;1", "aff_country_unique": "France;Switzerland;Italy" }, { "title": "Clusterability as an Alternative to Anchor Points When Learning with Noisy Labels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10625", "id": "10625", "proceeding": "http://proceedings.mlr.press/v139/zhu21e.html", "slides": "/media/icml-2021/Slides/10625.pdf", "author_site": "Zhaowei Zhu, Yiwen Song, Yang Liu", "author": "Zhaowei Zhu; Yiwen Song; Yang Liu", "abstract": "The label noise transition matrix, characterizing the probabilities of a training instance being wrongly annotated, is crucial to designing popular solutions to learning with noisy labels. Existing works heavily rely on finding \u201canchor points\u201d or their approximates, defined as instances belonging to a particular class almost surely. Nonetheless, finding anchor points remains a non-trivial task, and the estimation accuracy is also often throttled by the number of available anchor points. In this paper, we propose an alternative option to the above task. Our main contribution is the discovery of an efficient estimation procedure based on a clusterability condition. We prove that with clusterable representations of features, using up to third-order consensuses of noisy labels among neighbor representations is sufficient to estimate a unique transition matrix. Compared with methods using anchor points, our approach uses substantially more instances and benefits from a much better sample complexity. We demonstrate the estimation accuracy and advantages of our estimates using both synthetic noisy labels (on CIFAR-10/100) and real human-level noisy labels (on Clothing1M and our self-collected human-annotated CIFAR-10). Our code and human-level noisy CIFAR-10 labels are available at https://github.com/UCSC-REAL/HOC.", "bibtex": "@InProceedings{pmlr-v139-zhu21e,\n title = \t {Clusterability as an Alternative to Anchor Points When Learning with Noisy Labels},\n author = {Zhu, Zhaowei and Song, Yiwen and Liu, Yang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12912--12923},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21e/zhu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21e.html},\n abstract = \t {The label noise transition matrix, characterizing the probabilities of a training instance being wrongly annotated, is crucial to designing popular solutions to learning with noisy labels. Existing works heavily rely on finding \u201canchor points\u201d or their approximates, defined as instances belonging to a particular class almost surely. Nonetheless, finding anchor points remains a non-trivial task, and the estimation accuracy is also often throttled by the number of available anchor points. In this paper, we propose an alternative option to the above task. Our main contribution is the discovery of an efficient estimation procedure based on a clusterability condition. We prove that with clusterable representations of features, using up to third-order consensuses of noisy labels among neighbor representations is sufficient to estimate a unique transition matrix. Compared with methods using anchor points, our approach uses substantially more instances and benefits from a much better sample complexity. We demonstrate the estimation accuracy and advantages of our estimates using both synthetic noisy labels (on CIFAR-10/100) and real human-level noisy labels (on Clothing1M and our self-collected human-annotated CIFAR-10). Our code and human-level noisy CIFAR-10 labels are available at https://github.com/UCSC-REAL/HOC.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21e/zhu21e.pdf", "supp": "", "pdf_size": 648456, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4688909084811815305&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science and Engineering, University of California, Santa Cruz, CA, USA; Beijing University of Posts and Telecommunications, Beijing, China; Department of Computer Science and Engineering, University of California, Santa Cruz, CA, USA", "aff_domain": "ucsc.edu;bupt.edu.cn;ucsc.edu", "email": "ucsc.edu;bupt.edu.cn;ucsc.edu", "github": "https://github.com/UCSC-REAL/HOC", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21e.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Santa Cruz;Beijing University of Posts and Telecommunications", "aff_unique_dep": "Department of Computer Science and Engineering;", "aff_unique_url": "https://www.ucsc.edu;http://www.bupt.edu.cn/", "aff_unique_abbr": "UCSC;BUPT", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Santa Cruz;Beijing", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;China" }, { "title": "Clustered Sampling: Low-Variance and Improved Representativity for Clients Selection in Federated Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10543", "id": "10543", "proceeding": "http://proceedings.mlr.press/v139/fraboni21a.html", "slides": "", "author_site": "Yann Fraboni, Richard Vidal, Laetitia Kameni, Marco Lorenzi", "author": "Yann Fraboni; Richard Vidal; Laetitia Kameni; Marco Lorenzi", "abstract": "This work addresses the problem of optimizing communications between server and clients in federated learning (FL). Current sampling approaches in FL are either biased, or non optimal in terms of server-clients communications and training stability. To overcome this issue, we introduce clustered sampling for clients selection. We prove that clustered sampling leads to better clients representatitivity and to reduced variance of the clients stochastic aggregation weights in FL. Compatibly with our theory, we provide two different clustering approaches enabling clients aggregation based on 1) sample size, and 2) models similarity. Through a series of experiments in non-iid and unbalanced scenarios, we demonstrate that model aggregation through clustered sampling consistently leads to better training convergence and variability when compared to standard sampling approaches. Our approach does not require any additional operation on the clients side, and can be seamlessly integrated in standard FL implementations. Finally, clustered sampling is compatible with existing methods and technologies for privacy enhancement, and for communication reduction through model compression.", "bibtex": "@InProceedings{pmlr-v139-fraboni21a,\n title = \t {Clustered Sampling: Low-Variance and Improved Representativity for Clients Selection in Federated Learning},\n author = {Fraboni, Yann and Vidal, Richard and Kameni, Laetitia and Lorenzi, Marco},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3407--3416},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fraboni21a/fraboni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fraboni21a.html},\n abstract = \t {This work addresses the problem of optimizing communications between server and clients in federated learning (FL). Current sampling approaches in FL are either biased, or non optimal in terms of server-clients communications and training stability. To overcome this issue, we introduce clustered sampling for clients selection. We prove that clustered sampling leads to better clients representatitivity and to reduced variance of the clients stochastic aggregation weights in FL. Compatibly with our theory, we provide two different clustering approaches enabling clients aggregation based on 1) sample size, and 2) models similarity. Through a series of experiments in non-iid and unbalanced scenarios, we demonstrate that model aggregation through clustered sampling consistently leads to better training convergence and variability when compared to standard sampling approaches. Our approach does not require any additional operation on the clients side, and can be seamlessly integrated in standard FL implementations. Finally, clustered sampling is compatible with existing methods and technologies for privacy enhancement, and for communication reduction through model compression.}\n}", "pdf": "http://proceedings.mlr.press/v139/fraboni21a/fraboni21a.pdf", "supp": "", "pdf_size": 506477, "gs_citation": 255, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1617025297400599136&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Universit\u00e9 C\u00f4te d\u2019Azur, Inria, Epione Research Group, France + Accenture Labs, Sophia Antipolis, France; Accenture Labs, Sophia Antipolis, France; Accenture Labs, Sophia Antipolis, France; Universit\u00e9 C\u00f4te d\u2019Azur, Inria, Epione Research Group, France", "aff_domain": "inria.fr; ; ; ", "email": "inria.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fraboni21a.html", "aff_unique_index": "0+1;1;1;0", "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur;Accenture Labs", "aff_unique_dep": "Inria, Epione Research Group;", "aff_unique_url": "https://www.univ-cotedazur.fr;https://labs.accenture.com", "aff_unique_abbr": "UCA;", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Sophia Antipolis", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "France" }, { "title": "Coach-Player Multi-agent Reinforcement Learning for Dynamic Team Composition", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9723", "id": "9723", "proceeding": "http://proceedings.mlr.press/v139/liu21m.html", "slides": "/media/icml-2021/Slides/9723.pdf", "author_site": "Bo Liu, Qiang Liu, Peter Stone, Animesh Garg, Yuke Zhu, Anima Anandkumar", "author": "Bo Liu; Qiang Liu; Peter Stone; Animesh Garg; Yuke Zhu; Anima Anandkumar", "abstract": "In real-world multi-agent systems, agents with different capabilities may join or leave without altering the team\u2019s overarching goals. Coordinating teams with such dynamic composition is challenging: the optimal team strategy varies with the composition. We propose COPA, a coach-player framework to tackle this problem. We assume the coach has a global view of the environment and coordinates the players, who only have partial views, by distributing individual strategies. Specifically, we 1) adopt the attention mechanism for both the coach and the players; 2) propose a variational objective to regularize learning; and 3) design an adaptive communication method to let the coach decide when to communicate with the players. We validate our methods on a resource collection task, a rescue game, and the StarCraft micromanagement tasks. We demonstrate zero-shot generalization to new team compositions. Our method achieves comparable or better performance than the setting where all players have a full view of the environment. Moreover, we see that the performance remains high even when the coach communicates as little as 13% of the time using the adaptive communication strategy.", "bibtex": "@InProceedings{pmlr-v139-liu21m,\n title = \t {Coach-Player Multi-agent Reinforcement Learning for Dynamic Team Composition},\n author = {Liu, Bo and Liu, Qiang and Stone, Peter and Garg, Animesh and Zhu, Yuke and Anandkumar, Anima},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6860--6870},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21m/liu21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21m.html},\n abstract = \t {In real-world multi-agent systems, agents with different capabilities may join or leave without altering the team\u2019s overarching goals. Coordinating teams with such dynamic composition is challenging: the optimal team strategy varies with the composition. We propose COPA, a coach-player framework to tackle this problem. We assume the coach has a global view of the environment and coordinates the players, who only have partial views, by distributing individual strategies. Specifically, we 1) adopt the attention mechanism for both the coach and the players; 2) propose a variational objective to regularize learning; and 3) design an adaptive communication method to let the coach decide when to communicate with the players. We validate our methods on a resource collection task, a rescue game, and the StarCraft micromanagement tasks. We demonstrate zero-shot generalization to new team compositions. Our method achieves comparable or better performance than the setting where all players have a full view of the environment. Moreover, we see that the performance remains high even when the coach communicates as little as 13% of the time using the adaptive communication strategy.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21m/liu21m.pdf", "supp": "", "pdf_size": 2360965, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16222834590436839078&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, The University of Texas at Austin, Austin, USA; Department of Computer Science, The University of Texas at Austin, Austin, USA; Department of Computer Science, The University of Texas at Austin, Austin, USA; University of Toronto, Toronto, Canada + Nvidia; Department of Computer Science, The University of Texas at Austin, Austin, USA + Nvidia; Nvidia + California Institute of Technology, Pasadena, USA", "aff_domain": "cs.utexas.edu; ; ; ; ; ", "email": "cs.utexas.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/liu21m.html", "aff_unique_index": "0;0;0;1+2;0+2;2+3", "aff_unique_norm": "University of Texas at Austin;University of Toronto;NVIDIA;California Institute of Technology", "aff_unique_dep": "Department of Computer Science;;NVIDIA Corporation;", "aff_unique_url": "https://www.utexas.edu;https://www.utoronto.ca;https://www.nvidia.com;https://www.caltech.edu", "aff_unique_abbr": "UT Austin;U of T;NVIDIA;Caltech", "aff_campus_unique_index": "0;0;0;1;0;3", "aff_campus_unique": "Austin;Toronto;;Pasadena", "aff_country_unique_index": "0;0;0;1+0;0+0;0+0", "aff_country_unique": "United States;Canada" }, { "title": "Coded-InvNet for Resilient Prediction Serving Systems", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8601", "id": "8601", "proceeding": "http://proceedings.mlr.press/v139/dinh21a.html", "slides": "", "author_site": "Tuan Dinh, Kangwook Lee", "author": "Tuan Dinh; Kangwook Lee", "abstract": "Inspired by a new coded computation algorithm for invertible functions, we propose Coded-InvNet a new approach to design resilient prediction serving systems that can gracefully handle stragglers or node failures. Coded-InvNet leverages recent findings in the deep learning literature such as invertible neural networks, Manifold Mixup, and domain translation algorithms, identifying interesting research directions that span across machine learning and systems. Our experimental results show that Coded-InvNet can outperform existing approaches, especially when the compute resource overhead is as low as 10%. For instance, without knowing which of the ten workers is going to fail, our algorithm can design a backup task so that it can correctly recover the missing prediction result with an accuracy of 85.9%, significantly outperforming the previous SOTA by 32.5%.", "bibtex": "@InProceedings{pmlr-v139-dinh21a,\n title = \t {Coded-InvNet for Resilient Prediction Serving Systems},\n author = {Dinh, Tuan and Lee, Kangwook},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2749--2759},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dinh21a/dinh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dinh21a.html},\n abstract = \t {Inspired by a new coded computation algorithm for invertible functions, we propose Coded-InvNet a new approach to design resilient prediction serving systems that can gracefully handle stragglers or node failures. Coded-InvNet leverages recent findings in the deep learning literature such as invertible neural networks, Manifold Mixup, and domain translation algorithms, identifying interesting research directions that span across machine learning and systems. Our experimental results show that Coded-InvNet can outperform existing approaches, especially when the compute resource overhead is as low as 10%. For instance, without knowing which of the ten workers is going to fail, our algorithm can design a backup task so that it can correctly recover the missing prediction result with an accuracy of 85.9%, significantly outperforming the previous SOTA by 32.5%.}\n}", "pdf": "http://proceedings.mlr.press/v139/dinh21a/dinh21a.pdf", "supp": "", "pdf_size": 2046007, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2494675469783280507&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Sciences, University of Wisconsin-Madison, Madison, USA; Department of Electrical and Computer Engineering, University of Wisconsin-Madison, Madison, USA", "aff_domain": "wisc.edu;wisc.edu", "email": "wisc.edu;wisc.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/dinh21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Wisconsin-Madison", "aff_unique_dep": "Department of Computer Sciences", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW-Madison", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Collaborative Bayesian Optimization with Fair Regret", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9459", "id": "9459", "proceeding": "http://proceedings.mlr.press/v139/sim21b.html", "slides": "", "author_site": "Rachael Hwee Ling Sim, Yehong Zhang, Bryan Kian Hsiang Low, Patrick Jaillet", "author": "Rachael Hwee Ling Sim; Yehong Zhang; Bryan Kian Hsiang Low; Patrick Jaillet", "abstract": "Bayesian optimization (BO) is a popular tool for optimizing complex and costly-to-evaluate black-box objective functions. To further reduce the number of function evaluations, any party performing BO may be interested to collaborate with others to optimize the same objective function concurrently. To do this, existing BO algorithms have considered optimizing a batch of input queries in parallel and provided theoretical bounds on their cumulative regret reflecting inefficiency. However, when the objective function values are correlated with real-world rewards (e.g., money), parties may be hesitant to collaborate if they risk incurring larger cumulative regret (i.e., smaller real-world reward) than others. This paper shows that fairness and efficiency are both necessary for the collaborative BO setting. Inspired by social welfare concepts from economics, we propose a new notion of regret capturing these properties and a collaborative BO algorithm whose convergence rate can be theoretically guaranteed by bounding the new regret, both of which share an adjustable parameter for trading off between fairness vs. efficiency. We empirically demonstrate the benefits (e.g., increased fairness) of our algorithm using synthetic and real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-sim21b,\n title = \t {Collaborative Bayesian Optimization with Fair Regret},\n author = {Sim, Rachael Hwee Ling and Zhang, Yehong and Low, Bryan Kian Hsiang and Jaillet, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9691--9701},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sim21b/sim21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/sim21b.html},\n abstract = \t {Bayesian optimization (BO) is a popular tool for optimizing complex and costly-to-evaluate black-box objective functions. To further reduce the number of function evaluations, any party performing BO may be interested to collaborate with others to optimize the same objective function concurrently. To do this, existing BO algorithms have considered optimizing a batch of input queries in parallel and provided theoretical bounds on their cumulative regret reflecting inefficiency. However, when the objective function values are correlated with real-world rewards (e.g., money), parties may be hesitant to collaborate if they risk incurring larger cumulative regret (i.e., smaller real-world reward) than others. This paper shows that fairness and efficiency are both necessary for the collaborative BO setting. Inspired by social welfare concepts from economics, we propose a new notion of regret capturing these properties and a collaborative BO algorithm whose convergence rate can be theoretically guaranteed by bounding the new regret, both of which share an adjustable parameter for trading off between fairness vs. efficiency. We empirically demonstrate the benefits (e.g., increased fairness) of our algorithm using synthetic and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/sim21b/sim21b.pdf", "supp": "", "pdf_size": 3870906, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14353319151725704401&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, National University of Singapore, Republic of Singapore; Peng Cheng Laboratory, People\u2019s Republic of China; Department of Computer Science, National University of Singapore, Republic of Singapore + Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, USA; Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, USA", "aff_domain": "nus.edu.sg;pcl.ac.cn;comp.nus.edu.sg;mit.edu", "email": "nus.edu.sg;pcl.ac.cn;comp.nus.edu.sg;mit.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sim21b.html", "aff_unique_index": "0;1;0+2;2", "aff_unique_norm": "National University of Singapore;Pengcheng Laboratory;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Computer Science;Peng Cheng Laboratory;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.nus.edu.sg;;https://web.mit.edu", "aff_unique_abbr": "NUS;;MIT", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;1;0+2;2", "aff_country_unique": "Singapore;China;United States" }, { "title": "CombOptNet: Fit the Right NP-Hard Problem by Learning Integer Programming Constraints", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9555", "id": "9555", "proceeding": "http://proceedings.mlr.press/v139/paulus21a.html", "slides": "/media/icml-2021/Slides/9555.pdf", "author_site": "Anselm Paulus, Michal Rolinek, Vit Musil, Brandon Amos, Georg Martius", "author": "Anselm Paulus; Michal Rolinek; Vit Musil; Brandon Amos; Georg Martius", "abstract": "Bridging logical and algorithmic reasoning with modern machine learning techniques is a fundamental challenge with potentially transformative impact. On the algorithmic side, many NP-hard problems can be expressed as integer programs, in which the constraints play the role of their \u2019combinatorial specification\u2019. In this work, we aim to integrate integer programming solvers into neural network architectures as layers capable of learning both the cost terms and the constraints. The resulting end-to-end trainable architectures jointly extract features from raw data and solve a suitable (learned) combinatorial problem with state-of-the-art integer programming solvers. We demonstrate the potential of such layers with an extensive performance analysis on synthetic data and with a demonstration on a competitive computer vision keypoint matching benchmark.", "bibtex": "@InProceedings{pmlr-v139-paulus21a,\n title = \t {CombOptNet: Fit the Right NP-Hard Problem by Learning Integer Programming Constraints},\n author = {Paulus, Anselm and Rolinek, Michal and Musil, Vit and Amos, Brandon and Martius, Georg},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8443--8453},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/paulus21a/paulus21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/paulus21a.html},\n abstract = \t {Bridging logical and algorithmic reasoning with modern machine learning techniques is a fundamental challenge with potentially transformative impact. On the algorithmic side, many NP-hard problems can be expressed as integer programs, in which the constraints play the role of their \u2019combinatorial specification\u2019. In this work, we aim to integrate integer programming solvers into neural network architectures as layers capable of learning both the cost terms and the constraints. The resulting end-to-end trainable architectures jointly extract features from raw data and solve a suitable (learned) combinatorial problem with state-of-the-art integer programming solvers. We demonstrate the potential of such layers with an extensive performance analysis on synthetic data and with a demonstration on a competitive computer vision keypoint matching benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/paulus21a/paulus21a.pdf", "supp": "", "pdf_size": 2281827, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13237034191144507355&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Max-Planck-Institute for Intelligent Systems, T\u00fcbingen, Germany; Max-Planck-Institute for Intelligent Systems, T\u00fcbingen, Germany; Masaryk University, Brno, Czechia; Facebook AI Research, USA; Max-Planck-Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "tuebingen.mpg.de;tuebingen.mpg.de;masaryk.cz;fb.com;tuebingen.mpg.de", "email": "tuebingen.mpg.de;tuebingen.mpg.de;masaryk.cz;fb.com;tuebingen.mpg.de", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/paulus21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Max-Planck-Institute for Intelligent Systems;Masaryk University;Meta", "aff_unique_dep": ";;Facebook AI Research", "aff_unique_url": "https://www.mpi-is.mpg.de;https://www.muni.cz;https://research.facebook.com", "aff_unique_abbr": "MPI-IS;MU;FAIR", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "T\u00fcbingen;Brno;", "aff_country_unique_index": "0;0;1;2;0", "aff_country_unique": "Germany;Czechia;United States" }, { "title": "Combinatorial Blocking Bandits with Stochastic Delays", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10487", "id": "10487", "proceeding": "http://proceedings.mlr.press/v139/atsidakou21a.html", "slides": "", "author_site": "Alexia Atsidakou, Orestis Papadigenopoulos, Soumya Basu, Constantine Caramanis, Sanjay Shakkottai", "author": "Alexia Atsidakou; Orestis Papadigenopoulos; Soumya Basu; Constantine Caramanis; Sanjay Shakkottai", "abstract": "Recent work has considered natural variations of the {\\em multi-armed bandit} problem, where the reward distribution of each arm is a special function of the time passed since its last pulling. In this direction, a simple (yet widely applicable) model is that of {\\em blocking bandits}, where an arm becomes unavailable for a deterministic number of rounds after each play. In this work, we extend the above model in two directions: (i) We consider the general combinatorial setting where more than one arms can be played at each round, subject to feasibility constraints. (ii) We allow the blocking time of each arm to be stochastic. We first study the computational/unconditional hardness of the above setting and identify the necessary conditions for the problem to become tractable (even in an approximate sense). Based on these conditions, we provide a tight analysis of the approximation guarantee of a natural greedy heuristic that always plays the maximum expected reward feasible subset among the available (non-blocked) arms. When the arms\u2019 expected rewards are unknown, we adapt the above heuristic into a bandit algorithm, based on UCB, for which we provide sublinear (approximate) regret guarantees, matching the theoretical lower bounds in the limiting case of absence of delays.", "bibtex": "@InProceedings{pmlr-v139-atsidakou21a,\n title = \t {Combinatorial Blocking Bandits with Stochastic Delays},\n author = {Atsidakou, Alexia and Papadigenopoulos, Orestis and Basu, Soumya and Caramanis, Constantine and Shakkottai, Sanjay},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {404--413},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/atsidakou21a/atsidakou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/atsidakou21a.html},\n abstract = \t {Recent work has considered natural variations of the {\\em multi-armed bandit} problem, where the reward distribution of each arm is a special function of the time passed since its last pulling. In this direction, a simple (yet widely applicable) model is that of {\\em blocking bandits}, where an arm becomes unavailable for a deterministic number of rounds after each play. In this work, we extend the above model in two directions: (i) We consider the general combinatorial setting where more than one arms can be played at each round, subject to feasibility constraints. (ii) We allow the blocking time of each arm to be stochastic. We first study the computational/unconditional hardness of the above setting and identify the necessary conditions for the problem to become tractable (even in an approximate sense). Based on these conditions, we provide a tight analysis of the approximation guarantee of a natural greedy heuristic that always plays the maximum expected reward feasible subset among the available (non-blocked) arms. When the arms\u2019 expected rewards are unknown, we adapt the above heuristic into a bandit algorithm, based on UCB, for which we provide sublinear (approximate) regret guarantees, matching the theoretical lower bounds in the limiting case of absence of delays.}\n}", "pdf": "http://proceedings.mlr.press/v139/atsidakou21a/atsidakou21a.pdf", "supp": "", "pdf_size": 605427, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15794464336620405955&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, The University of Texas at Austin, USA; Department of Computer Science, The University of Texas at Austin, USA; Google, Mountain View, USA; Department of Electrical and Computer Engineering, The University of Texas at Austin, USA; Department of Electrical and Computer Engineering, The University of Texas at Austin, USA", "aff_domain": "utexas.edu;cs.utexas.edu; ;utexas.edu;utexas.edu", "email": "utexas.edu;cs.utexas.edu; ;utexas.edu;utexas.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/atsidakou21a.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "University of Texas at Austin;Google", "aff_unique_dep": "Department of Electrical and Computer Engineering;Google", "aff_unique_url": "https://www.utexas.edu;https://www.google.com", "aff_unique_abbr": "UT Austin;Google", "aff_campus_unique_index": "0;0;1;0;0", "aff_campus_unique": "Austin;Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Combining Pessimism with Optimism for Robust and Efficient Model-Based Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9877", "id": "9877", "proceeding": "http://proceedings.mlr.press/v139/curi21a.html", "slides": "/media/icml-2021/Slides/9877.pdf", "author_site": "Sebastian Curi, Ilija Bogunovic, Andreas Krause", "author": "Sebastian Curi; Ilija Bogunovic; Andreas Krause", "abstract": "In real-world tasks, reinforcement learning (RL) agents frequently encounter situations that are not present during training time. To ensure reliable performance, the RL agents need to exhibit robustness to such worst-case situations. The robust-RL framework addresses this challenge via a minimax optimization between an agent and an adversary. Previous robust RL algorithms are either sample inefficient, lack robustness guarantees, or do not scale to large problems. We propose the Robust Hallucinated Upper-Confidence RL (RH-UCRL) algorithm to provably solve this problem while attaining near-optimal sample complexity guarantees. RH-UCRL is a model-based reinforcement learning (MBRL) algorithm that effectively distinguishes between epistemic and aleatoric uncertainty and efficiently explores both the agent and the adversary decision spaces during policy learning. We scale RH-UCRL to complex tasks via neural networks ensemble models as well as neural network policies. Experimentally we demonstrate that RH-UCRL outperforms other robust deep RL algorithms in a variety of adversarial environments.", "bibtex": "@InProceedings{pmlr-v139-curi21a,\n title = \t {Combining Pessimism with Optimism for Robust and Efficient Model-Based Deep Reinforcement Learning},\n author = {Curi, Sebastian and Bogunovic, Ilija and Krause, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2254--2264},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/curi21a/curi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/curi21a.html},\n abstract = \t {In real-world tasks, reinforcement learning (RL) agents frequently encounter situations that are not present during training time. To ensure reliable performance, the RL agents need to exhibit robustness to such worst-case situations. The robust-RL framework addresses this challenge via a minimax optimization between an agent and an adversary. Previous robust RL algorithms are either sample inefficient, lack robustness guarantees, or do not scale to large problems. We propose the Robust Hallucinated Upper-Confidence RL (RH-UCRL) algorithm to provably solve this problem while attaining near-optimal sample complexity guarantees. RH-UCRL is a model-based reinforcement learning (MBRL) algorithm that effectively distinguishes between epistemic and aleatoric uncertainty and efficiently explores both the agent and the adversary decision spaces during policy learning. We scale RH-UCRL to complex tasks via neural networks ensemble models as well as neural network policies. Experimentally we demonstrate that RH-UCRL outperforms other robust deep RL algorithms in a variety of adversarial environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/curi21a/curi21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13138049233560516090&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/curi21a.html" }, { "title": "Communication-Efficient Distributed Optimization with Quantized Preconditioners", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8611", "id": "8611", "proceeding": "http://proceedings.mlr.press/v139/alimisis21a.html", "slides": "", "author_site": "Foivos Alimisis, Peter Davies, Dan Alistarh", "author": "Foivos Alimisis; Peter Davies; Dan Alistarh", "abstract": "We investigate fast and communication-efficient algorithms for the classic problem of minimizing a sum of strongly convex and smooth functions that are distributed among $n$ different nodes, which can communicate using a limited number of bits. Most previous communication-efficient approaches for this problem are limited to first-order optimization, and therefore have \\emph{linear} dependence on the condition number in their communication complexity. We show that this dependence is not inherent: communication-efficient methods can in fact have sublinear dependence on the condition number. For this, we design and analyze the first communication-efficient distributed variants of preconditioned gradient descent for Generalized Linear Models, and for Newton\u2019s method. Our results rely on a new technique for quantizing both the preconditioner and the descent direction at each step of the algorithms, while controlling their convergence rate. We also validate our findings experimentally, showing faster convergence and reduced communication relative to previous methods.", "bibtex": "@InProceedings{pmlr-v139-alimisis21a,\n title = \t {Communication-Efficient Distributed Optimization with Quantized Preconditioners},\n author = {Alimisis, Foivos and Davies, Peter and Alistarh, Dan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {196--206},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/alimisis21a/alimisis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/alimisis21a.html},\n abstract = \t {We investigate fast and communication-efficient algorithms for the classic problem of minimizing a sum of strongly convex and smooth functions that are distributed among $n$ different nodes, which can communicate using a limited number of bits. Most previous communication-efficient approaches for this problem are limited to first-order optimization, and therefore have \\emph{linear} dependence on the condition number in their communication complexity. We show that this dependence is not inherent: communication-efficient methods can in fact have sublinear dependence on the condition number. For this, we design and analyze the first communication-efficient distributed variants of preconditioned gradient descent for Generalized Linear Models, and for Newton\u2019s method. Our results rely on a new technique for quantizing both the preconditioner and the descent direction at each step of the algorithms, while controlling their convergence rate. We also validate our findings experimentally, showing faster convergence and reduced communication relative to previous methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/alimisis21a/alimisis21a.pdf", "supp": "", "pdf_size": 429087, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2429704257928189444&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, University of Geneva, Switzerland (work done while at IST Austria); IST Austria; IST Austria + Neural Magic, US", "aff_domain": "unige.ch; ; ", "email": "unige.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/alimisis21a.html", "aff_unique_index": "0;1;1+2", "aff_unique_norm": "University of Geneva;Institute of Science and Technology Austria;Neural Magic", "aff_unique_dep": "Department of Mathematics;;", "aff_unique_url": "https://www.unige.ch;https://www.ist.ac.at;", "aff_unique_abbr": "UniGE;IST Austria;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1+2", "aff_country_unique": "Switzerland;Austria;United States" }, { "title": "Communication-Efficient Distributed SVD via Local Power Iterations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8659", "id": "8659", "proceeding": "http://proceedings.mlr.press/v139/li21u.html", "slides": "", "author_site": "Xiang Li, Shusen Wang, Kun Chen, Zhihua Zhang", "author": "Xiang Li; Shusen Wang; Kun Chen; Zhihua Zhang", "abstract": "We study distributed computing of the truncated singular value decomposition (SVD). We develop an algorithm that we call \\texttt{LocalPower} for improving communication efficiency. Specifically, we uniformly partition the dataset among $m$ nodes and alternate between multiple (precisely $p$) local power iterations and one global aggregation. In the aggregation, we propose to weight each local eigenvector matrix with orthogonal Procrustes transformation (OPT). As a practical surrogate of OPT, sign-fixing, which uses a diagonal matrix with $\\pm 1$ entries as weights, has better computation complexity and stability in experiments. We theoretically show that under certain assumptions \\texttt{LocalPower} lowers the required number of communications by a factor of $p$ to reach a constant accuracy. We also show that the strategy of periodically decaying $p$ helps obtain high-precision solutions. We conduct experiments to demonstrate the effectiveness of \\texttt{LocalPower}.", "bibtex": "@InProceedings{pmlr-v139-li21u,\n title = \t {Communication-Efficient Distributed SVD via Local Power Iterations},\n author = {Li, Xiang and Wang, Shusen and Chen, Kun and Zhang, Zhihua},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6504--6514},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21u/li21u.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21u.html},\n abstract = \t {We study distributed computing of the truncated singular value decomposition (SVD). We develop an algorithm that we call \\texttt{LocalPower} for improving communication efficiency. Specifically, we uniformly partition the dataset among $m$ nodes and alternate between multiple (precisely $p$) local power iterations and one global aggregation. In the aggregation, we propose to weight each local eigenvector matrix with orthogonal Procrustes transformation (OPT). As a practical surrogate of OPT, sign-fixing, which uses a diagonal matrix with $\\pm 1$ entries as weights, has better computation complexity and stability in experiments. We theoretically show that under certain assumptions \\texttt{LocalPower} lowers the required number of communications by a factor of $p$ to reach a constant accuracy. We also show that the strategy of periodically decaying $p$ helps obtain high-precision solutions. We conduct experiments to demonstrate the effectiveness of \\texttt{LocalPower}.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21u/li21u.pdf", "supp": "", "pdf_size": 658686, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1741371435444323515&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "School of Mathematical Sciences, Peking University, China; Department of Computer Science, Stevens Institute of Technology, USA; School of Mathematical Sciences, Peking University, China; School of Mathematical Sciences, Peking University, China", "aff_domain": "pku.edu.cn; ; ; ", "email": "pku.edu.cn; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/li21u.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Peking University;Stevens Institute of Technology", "aff_unique_dep": "School of Mathematical Sciences;Department of Computer Science", "aff_unique_url": "http://www.pku.edu.cn;https://www.stevens.edu", "aff_unique_abbr": "PKU;SIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "China;United States" }, { "title": "Commutative Lie Group VAE for Disentanglement Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8905", "id": "8905", "proceeding": "http://proceedings.mlr.press/v139/zhu21f.html", "slides": "/media/icml-2021/Slides/8905.pdf", "author_site": "Xinqi Zhu, Chang Xu, Dacheng Tao", "author": "Xinqi Zhu; Chang Xu; Dacheng Tao", "abstract": "We view disentanglement learning as discovering an underlying structure that equivariantly reflects the factorized variations shown in data. Traditionally, such a structure is fixed to be a vector space with data variations represented by translations along individual latent dimensions. We argue this simple structure is suboptimal since it requires the model to learn to discard the properties (e.g. different scales of changes, different levels of abstractness) of data variations, which is an extra work than equivariance learning. Instead, we propose to encode the data variations with groups, a structure not only can equivariantly represent variations, but can also be adaptively optimized to preserve the properties of data variations. Considering it is hard to conduct training on group structures, we focus on Lie groups and adopt a parameterization using Lie algebra. Based on the parameterization, some disentanglement learning constraints are naturally derived. A simple model named Commutative Lie Group VAE is introduced to realize the group-based disentanglement learning. Experiments show that our model can effectively learn disentangled representations without supervision, and can achieve state-of-the-art performance without extra constraints.", "bibtex": "@InProceedings{pmlr-v139-zhu21f,\n title = \t {Commutative Lie Group VAE for Disentanglement Learning},\n author = {Zhu, Xinqi and Xu, Chang and Tao, Dacheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12924--12934},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21f/zhu21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21f.html},\n abstract = \t {We view disentanglement learning as discovering an underlying structure that equivariantly reflects the factorized variations shown in data. Traditionally, such a structure is fixed to be a vector space with data variations represented by translations along individual latent dimensions. We argue this simple structure is suboptimal since it requires the model to learn to discard the properties (e.g. different scales of changes, different levels of abstractness) of data variations, which is an extra work than equivariance learning. Instead, we propose to encode the data variations with groups, a structure not only can equivariantly represent variations, but can also be adaptively optimized to preserve the properties of data variations. Considering it is hard to conduct training on group structures, we focus on Lie groups and adopt a parameterization using Lie algebra. Based on the parameterization, some disentanglement learning constraints are naturally derived. A simple model named Commutative Lie Group VAE is introduced to realize the group-based disentanglement learning. Experiments show that our model can effectively learn disentangled representations without supervision, and can achieve state-of-the-art performance without extra constraints.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21f/zhu21f.pdf", "supp": "", "pdf_size": 3934206, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13512230477271020552&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Computer Science, Faculty of Engineering, The University of Sydney, Australia+JD Explore Academy, JD.com, China; School of Computer Science, Faculty of Engineering, The University of Sydney, Australia; JD Explore Academy, JD.com, China", "aff_domain": "uni.sydney.edu.au; ; ", "email": "uni.sydney.edu.au; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21f.html", "aff_unique_index": "0+1;0;1", "aff_unique_norm": "University of Sydney;JD.com", "aff_unique_dep": "School of Computer Science;JD Explore Academy", "aff_unique_url": "https://www.sydney.edu.au;https://www.jd.com", "aff_unique_abbr": "USYD;JD", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0;1", "aff_country_unique": "Australia;China" }, { "title": "Composed Fine-Tuning: Freezing Pre-Trained Denoising Autoencoders for Improved Generalization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10589", "id": "10589", "proceeding": "http://proceedings.mlr.press/v139/xie21f.html", "slides": "/media/icml-2021/Slides/10589.pdf", "author_site": "Sang Michael Xie, Tengyu Ma, Percy Liang", "author": "Sang Michael Xie; Tengyu Ma; Percy Liang", "abstract": "We focus on prediction problems with structured outputs that are subject to output validity constraints, e.g. pseudocode-to-code translation where the code must compile. While labeled input-output pairs are expensive to obtain, \"unlabeled\" outputs, i.e. outputs without corresponding inputs, are freely available (e.g. code on GitHub) and provide information about output validity. Pre-training captures this structure by training a denoiser to denoise corrupted versions of unlabeled outputs. We first show that standard fine-tuning after pre-training destroys some of this structure. We then propose composed fine-tuning, which trains a predictor composed with the pre-trained denoiser. Importantly, the denoiser is fixed to preserve output structure. Like standard fine-tuning, the predictor is also initialized with the pre-trained denoiser. We prove for two-layer ReLU networks that composed fine-tuning significantly reduces the complexity of the predictor, thus improving generalization. Empirically, we show that composed fine-tuning improves over standard fine-tuning on two pseudocode-to-code translation datasets (3% and 6% relative). The improvement is magnified on out-of-distribution (OOD) examples (4% and 25% relative), suggesting that reducing predictor complexity improves OOD extrapolation.", "bibtex": "@InProceedings{pmlr-v139-xie21f,\n title = \t {Composed Fine-Tuning: Freezing Pre-Trained Denoising Autoencoders for Improved Generalization},\n author = {Xie, Sang Michael and Ma, Tengyu and Liang, Percy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11424--11435},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21f/xie21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21f.html},\n abstract = \t {We focus on prediction problems with structured outputs that are subject to output validity constraints, e.g. pseudocode-to-code translation where the code must compile. While labeled input-output pairs are expensive to obtain, \"unlabeled\" outputs, i.e. outputs without corresponding inputs, are freely available (e.g. code on GitHub) and provide information about output validity. Pre-training captures this structure by training a denoiser to denoise corrupted versions of unlabeled outputs. We first show that standard fine-tuning after pre-training destroys some of this structure. We then propose composed fine-tuning, which trains a predictor composed with the pre-trained denoiser. Importantly, the denoiser is fixed to preserve output structure. Like standard fine-tuning, the predictor is also initialized with the pre-trained denoiser. We prove for two-layer ReLU networks that composed fine-tuning significantly reduces the complexity of the predictor, thus improving generalization. Empirically, we show that composed fine-tuning improves over standard fine-tuning on two pseudocode-to-code translation datasets (3% and 6% relative). The improvement is magnified on out-of-distribution (OOD) examples (4% and 25% relative), suggesting that reducing predictor complexity improves OOD extrapolation.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21f/xie21f.pdf", "supp": "", "pdf_size": 2873414, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10069903863251883068&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu; ; ", "email": "cs.stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xie21f.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Composing Normalizing Flows for Inverse Problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9461", "id": "9461", "proceeding": "http://proceedings.mlr.press/v139/whang21b.html", "slides": "", "author_site": "Jay Whang, Erik Lindgren, Alexandros Dimakis", "author": "Jay Whang; Erik Lindgren; Alex Dimakis", "abstract": "Given an inverse problem with a normalizing flow prior, we wish to estimate the distribution of the underlying signal conditioned on the observations. We approach this problem as a task of conditional inference on the pre-trained unconditional flow model. We first establish that this is computationally hard for a large class of flow models. Motivated by this, we propose a framework for approximate inference that estimates the target conditional as a composition of two flow models. This formulation leads to a stable variational inference training procedure that avoids adversarial training. Our method is evaluated on a variety of inverse problems and is shown to produce high-quality samples with uncertainty quantification. We further demonstrate that our approach can be amortized for zero-shot inference.", "bibtex": "@InProceedings{pmlr-v139-whang21b,\n title = \t {Composing Normalizing Flows for Inverse Problems},\n author = {Whang, Jay and Lindgren, Erik and Dimakis, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11158--11169},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/whang21b/whang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/whang21b.html},\n abstract = \t {Given an inverse problem with a normalizing flow prior, we wish to estimate the distribution of the underlying signal conditioned on the observations. We approach this problem as a task of conditional inference on the pre-trained unconditional flow model. We first establish that this is computationally hard for a large class of flow models. Motivated by this, we propose a framework for approximate inference that estimates the target conditional as a composition of two flow models. This formulation leads to a stable variational inference training procedure that avoids adversarial training. Our method is evaluated on a variety of inverse problems and is shown to produce high-quality samples with uncertainty quantification. We further demonstrate that our approach can be amortized for zero-shot inference.}\n}", "pdf": "http://proceedings.mlr.press/v139/whang21b/whang21b.pdf", "supp": "", "pdf_size": 2185191, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13855583865406383128&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Dept. of Computer Science, UT Austin, TX, USA; Google Research, NY, USA; Dept. of Electrical and Computer Engineering, UT Austin, TX, USA", "aff_domain": "utexas.edu; ; ", "email": "utexas.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/whang21b.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Texas at Austin;Google", "aff_unique_dep": "Department of Computer Science;Google Research", "aff_unique_url": "https://www.utexas.edu;https://research.google", "aff_unique_abbr": "UT Austin;Google", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Austin;New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Compositional Video Synthesis with Action Graphs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10449", "id": "10449", "proceeding": "http://proceedings.mlr.press/v139/bar21a.html", "slides": "", "author_site": "Amir Bar, Roi Herzig, Xiaolong Wang, Anna Rohrbach, Gal Chechik, Trevor Darrell, Amir Globerson", "author": "Amir Bar; Roei Herzig; Xiaolong Wang; Anna Rohrbach; Gal Chechik; Trevor Darrell; Amir Globerson", "abstract": "Videos of actions are complex signals containing rich compositional structure in space and time. Current video generation methods lack the ability to condition the generation on multiple coordinated and potentially simultaneous timed actions. To address this challenge, we propose to represent the actions in a graph structure called Action Graph and present the new \"Action Graph To Video\" synthesis task. Our generative model for this task (AG2Vid) disentangles motion and appearance features, and by incorporating a scheduling mechanism for actions facilitates a timely and coordinated video generation. We train and evaluate AG2Vid on CATER and Something-Something V2 datasets, which results in videos that have better visual quality and semantic consistency compared to baselines. Finally, our model demonstrates zero-shot abilities by synthesizing novel compositions of the learned actions.", "bibtex": "@InProceedings{pmlr-v139-bar21a,\n title = \t {Compositional Video Synthesis with Action Graphs},\n author = {Bar, Amir and Herzig, Roei and Wang, Xiaolong and Rohrbach, Anna and Chechik, Gal and Darrell, Trevor and Globerson, Amir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {662--673},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bar21a/bar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bar21a.html},\n abstract = \t {Videos of actions are complex signals containing rich compositional structure in space and time. Current video generation methods lack the ability to condition the generation on multiple coordinated and potentially simultaneous timed actions. To address this challenge, we propose to represent the actions in a graph structure called Action Graph and present the new \"Action Graph To Video\" synthesis task. Our generative model for this task (AG2Vid) disentangles motion and appearance features, and by incorporating a scheduling mechanism for actions facilitates a timely and coordinated video generation. We train and evaluate AG2Vid on CATER and Something-Something V2 datasets, which results in videos that have better visual quality and semantic consistency compared to baselines. Finally, our model demonstrates zero-shot abilities by synthesizing novel compositions of the learned actions.}\n}", "pdf": "http://proceedings.mlr.press/v139/bar21a/bar21a.pdf", "supp": "", "pdf_size": 7648424, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=835836297893492143&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; UC San Diego; UC Berkeley; NVIDIA Research; UC Berkeley; The Blavatnik School of Computer Science, Tel Aviv University", "aff_domain": "cs.tau.ac.il; ; ; ; ; ; ", "email": "cs.tau.ac.il; ; ; ; ; ; ", "github": "", "project": "https://roeiherz.github.io/AG2Video", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/bar21a.html", "aff_unique_index": "0;0;1;2;3;2;0", "aff_unique_norm": "Tel Aviv University;University of California, San Diego;University of California, Berkeley;NVIDIA", "aff_unique_dep": "Blavatnik School of Computer Science;;;NVIDIA Research", "aff_unique_url": "https://www.tau.ac.il;https://www.ucsd.edu;https://www.berkeley.edu;https://www.nvidia.com/research", "aff_unique_abbr": "TAU;UCSD;UC Berkeley;NVIDIA", "aff_campus_unique_index": "0;0;1;2;2;0", "aff_campus_unique": "Tel Aviv;San Diego;Berkeley;", "aff_country_unique_index": "0;0;1;1;1;1;0", "aff_country_unique": "Israel;United States" }, { "title": "Compressed Maximum Likelihood", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8705", "id": "8705", "proceeding": "http://proceedings.mlr.press/v139/hao21c.html", "slides": "/media/icml-2021/Slides/8705.pdf", "author_site": "Yi Hao, Alon Orlitsky", "author": "Yi Hao; Alon Orlitsky", "abstract": "Maximum likelihood (ML) is one of the most fundamental and general statistical estimation techniques. Inspired by recent advances in estimating distribution functionals, we propose $\\textit{compressed maximum likelihood}$ (CML) that applies ML to the compressed samples. We then show that CML is sample-efficient for several essential learning tasks over both discrete and continuous domains, including learning densities with structures, estimating probability multisets, and inferring symmetric distribution functionals.", "bibtex": "@InProceedings{pmlr-v139-hao21c,\n title = \t {Compressed Maximum Likelihood},\n author = {Hao, Yi and Orlitsky, Alon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4085--4095},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hao21c/hao21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/hao21c.html},\n abstract = \t {Maximum likelihood (ML) is one of the most fundamental and general statistical estimation techniques. Inspired by recent advances in estimating distribution functionals, we propose $\\textit{compressed maximum likelihood}$ (CML) that applies ML to the compressed samples. We then show that CML is sample-efficient for several essential learning tasks over both discrete and continuous domains, including learning densities with structures, estimating probability multisets, and inferring symmetric distribution functionals.}\n}", "pdf": "http://proceedings.mlr.press/v139/hao21c/hao21c.pdf", "supp": "", "pdf_size": 322139, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "Department of Electrical and Computer Engineering, University of California, San Diego, USA; Department of Electrical and Computer Engineering, University of California, San Diego, USA", "aff_domain": "eng.ucsd.edu; ", "email": "eng.ucsd.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hao21c.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8525", "id": "8525", "proceeding": "http://proceedings.mlr.press/v139/d-ascoli21a.html", "slides": "", "author_site": "St\u00e9phane d'Ascoli, Hugo Touvron, Matthew Leavitt, Ari Morcos, Giulio Biroli, Levent Sagun", "author": "St\u00e9phane D\u2019Ascoli; Hugo Touvron; Matthew L Leavitt; Ari S Morcos; Giulio Biroli; Levent Sagun", "abstract": "Convolutional architectures have proven extremely successful for vision tasks. Their hard inductive biases enable sample-efficient learning, but come at the cost of a potentially lower performance ceiling. Vision Transformers (ViTs) rely on more flexible self-attention layers, and have recently outperformed CNNs for image classification. However, they require costly pre-training on large external datasets or distillation from pre-trained convolutional networks. In this paper, we ask the following question: is it possible to combine the strengths of these two architectures while avoiding their respective limitations? To this end, we introduce gated positional self-attention (GPSA), a form of positional self-attention which can be equipped with a \u201csoft\" convolutional inductive bias. We initialise the GPSA layers to mimic the locality of convolutional layers, then give each attention head the freedom to escape locality by adjusting a gating parameter regulating the attention paid to position versus content information. The resulting convolutional-like ViT architecture, ConViT, outperforms the DeiT on ImageNet, while offering a much improved sample efficiency. We further investigate the role of locality in learning by first quantifying how it is encouraged in vanilla self-attention layers, then analysing how it is escaped in GPSA layers. We conclude by presenting various ablations to better understand the success of the ConViT. Our code and models are released publicly at https://github.com/facebookresearch/convit.", "bibtex": "@InProceedings{pmlr-v139-d-ascoli21a,\n title = \t {ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},\n author = {D'Ascoli, St{\\'e}phane and Touvron, Hugo and Leavitt, Matthew L and Morcos, Ari S and Biroli, Giulio and Sagun, Levent},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2286--2296},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/d-ascoli21a/d-ascoli21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/d-ascoli21a.html},\n abstract = \t {Convolutional architectures have proven extremely successful for vision tasks. Their hard inductive biases enable sample-efficient learning, but come at the cost of a potentially lower performance ceiling. Vision Transformers (ViTs) rely on more flexible self-attention layers, and have recently outperformed CNNs for image classification. However, they require costly pre-training on large external datasets or distillation from pre-trained convolutional networks. In this paper, we ask the following question: is it possible to combine the strengths of these two architectures while avoiding their respective limitations? To this end, we introduce gated positional self-attention (GPSA), a form of positional self-attention which can be equipped with a \u201csoft\" convolutional inductive bias. We initialise the GPSA layers to mimic the locality of convolutional layers, then give each attention head the freedom to escape locality by adjusting a gating parameter regulating the attention paid to position versus content information. The resulting convolutional-like ViT architecture, ConViT, outperforms the DeiT on ImageNet, while offering a much improved sample efficiency. We further investigate the role of locality in learning by first quantifying how it is encouraged in vanilla self-attention layers, then analysing how it is escaped in GPSA layers. We conclude by presenting various ablations to better understand the success of the ConViT. Our code and models are released publicly at https://github.com/facebookresearch/convit.}\n}", "pdf": "http://proceedings.mlr.press/v139/d-ascoli21a/d-ascoli21a.pdf", "supp": "", "pdf_size": 1061653, "gs_citation": 1025, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=817698272872287436&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Physics, Ecole Normale Sup\u00e9rieure, Paris, France+Facebook AI Research, Paris, France; Facebook AI Research, Paris, France; Facebook AI Research, Paris, France; Facebook AI Research, Paris, France; Department of Physics, Ecole Normale Sup\u00e9rieure, Paris, France+Facebook AI Research, Paris, France; Facebook AI Research, Paris, France", "aff_domain": "ens.fr; ; ; ; ; ", "email": "ens.fr; ; ; ; ; ", "github": "https://github.com/facebookresearch/convit", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/d-ascoli21a.html", "aff_unique_index": "0+1;1;1;1;0+1;1", "aff_unique_norm": "Ecole Normale Sup\u00e9rieure;Meta", "aff_unique_dep": "Department of Physics;Facebook AI Research", "aff_unique_url": "https://www.ens.fr;https://research.facebook.com", "aff_unique_abbr": "ENS;FAIR", "aff_campus_unique_index": "0+0;0;0;0;0+0;0", "aff_campus_unique": "Paris", "aff_country_unique_index": "0+0;0;0;0;0+0;0", "aff_country_unique": "France" }, { "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8509", "id": "8509", "proceeding": "http://proceedings.mlr.press/v139/collas21a.html", "slides": "", "author_site": "Fabien Collas, Ekhine IRUROZKI", "author": "Fabien Collas; Ekhine Irurozki", "abstract": "In this paper, we study mixtures of two Mallows models for top-$k$ rankings with equal location parameters but with different scale parameters (a mixture of concentric Mallows models). These models arise when we have a heterogeneous population of voters formed by two populations, one of which is a subpopulation of expert voters. We show the identifiability of both components and the learnability of their respective parameters. These results are based upon, first, bounding the sample complexity for the Borda algorithm with top-$k$ rankings. Second, we characterize the distances between rankings, showing that an off-the-shelf clustering algorithm separates the rankings by components with high probability -provided the scales are well-separated.As a by-product, we include an efficient sampling algorithm for Mallows top-$k$ rankings. Finally, since the rank aggregation will suffer from a large amount of noise introduced by the non-expert voters, we adapt the Borda algorithm to be able to recover the ground truth consensus ranking which is especially consistent with the expert rankings.", "bibtex": "@InProceedings{pmlr-v139-collas21a,\n title = \t {Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability},\n author = {Collas, Fabien and Irurozki, Ekhine},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2079--2088},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/collas21a/collas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/collas21a.html},\n abstract = \t {In this paper, we study mixtures of two Mallows models for top-$k$ rankings with equal location parameters but with different scale parameters (a mixture of concentric Mallows models). These models arise when we have a heterogeneous population of voters formed by two populations, one of which is a subpopulation of expert voters. We show the identifiability of both components and the learnability of their respective parameters. These results are based upon, first, bounding the sample complexity for the Borda algorithm with top-$k$ rankings. Second, we characterize the distances between rankings, showing that an off-the-shelf clustering algorithm separates the rankings by components with high probability -provided the scales are well-separated.As a by-product, we include an efficient sampling algorithm for Mallows top-$k$ rankings. Finally, since the rank aggregation will suffer from a large amount of noise introduced by the non-expert voters, we adapt the Borda algorithm to be able to recover the ground truth consensus ranking which is especially consistent with the expert rankings.}\n}", "pdf": "http://proceedings.mlr.press/v139/collas21a/collas21a.pdf", "supp": "", "pdf_size": 476418, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12860162197354485802&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Basque Center for Applied Mathematics, Bilbao, Spain; LTCI, Telecom Paris, Institut Polytechnique de Paris", "aff_domain": "bcamath.org;telecom-paris.fr", "email": "bcamath.org;telecom-paris.fr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/collas21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Basque Center for Applied Mathematics;Telecom Paris", "aff_unique_dep": ";LTCI", "aff_unique_url": "https://www.bcamath.org/;https://www.telecom-paris.fr", "aff_unique_abbr": "BCAM;Telecom Paris", "aff_campus_unique_index": "0", "aff_campus_unique": "Bilbao;", "aff_country_unique_index": "0;1", "aff_country_unique": "Spain;France" }, { "title": "Conditional Distributional Treatment Effect with Kernel Conditional Mean Embeddings and U-Statistic Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8681", "id": "8681", "proceeding": "http://proceedings.mlr.press/v139/park21c.html", "slides": "/media/icml-2021/Slides/8681.pdf", "author_site": "Junhyung Park, Uri Shalit, Bernhard Sch\u00f6lkopf, Krikamol Muandet", "author": "Junhyung Park; Uri Shalit; Bernhard Sch\u00f6lkopf; Krikamol Muandet", "abstract": "We propose to analyse the conditional distributional treatment effect (CoDiTE), which, in contrast to the more common conditional average treatment effect (CATE), is designed to encode a treatment\u2019s distributional aspects beyond the mean. We first introduce a formal definition of the CoDiTE associated with a distance function between probability measures. Then we discuss the CoDiTE associated with the maximum mean discrepancy via kernel conditional mean embeddings, which, coupled with a hypothesis test, tells us whether there is any conditional distributional effect of the treatment. Finally, we investigate what kind of conditional distributional effect the treatment has, both in an exploratory manner via the conditional witness function, and in a quantitative manner via U-statistic regression, generalising the CATE to higher-order moments. Experiments on synthetic, semi-synthetic and real datasets demonstrate the merits of our approach.", "bibtex": "@InProceedings{pmlr-v139-park21c,\n title = \t {Conditional Distributional Treatment Effect with Kernel Conditional Mean Embeddings and U-Statistic Regression},\n author = {Park, Junhyung and Shalit, Uri and Sch{\\\"o}lkopf, Bernhard and Muandet, Krikamol},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8401--8412},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/park21c/park21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/park21c.html},\n abstract = \t {We propose to analyse the conditional distributional treatment effect (CoDiTE), which, in contrast to the more common conditional average treatment effect (CATE), is designed to encode a treatment\u2019s distributional aspects beyond the mean. We first introduce a formal definition of the CoDiTE associated with a distance function between probability measures. Then we discuss the CoDiTE associated with the maximum mean discrepancy via kernel conditional mean embeddings, which, coupled with a hypothesis test, tells us whether there is any conditional distributional effect of the treatment. Finally, we investigate what kind of conditional distributional effect the treatment has, both in an exploratory manner via the conditional witness function, and in a quantitative manner via U-statistic regression, generalising the CATE to higher-order moments. Experiments on synthetic, semi-synthetic and real datasets demonstrate the merits of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/park21c/park21c.pdf", "supp": "", "pdf_size": 548051, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12899216788410457863&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Technion, Israel Institute of Technology; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "tuebingen.mpg.de; ; ; ", "email": "tuebingen.mpg.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/park21c.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Max Planck Institute for Intelligent Systems;Israel Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.mpi-is.mpg.de;https://www.technion.ac.il/en/", "aff_unique_abbr": "MPI-IS;Technion", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "T\u00fcbingen;", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "Germany;Israel" }, { "title": "Conditional Temporal Neural Processes with Covariance Loss", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10515", "id": "10515", "proceeding": "http://proceedings.mlr.press/v139/yoo21b.html", "slides": "/media/icml-2021/Slides/10515.pdf", "author_site": "Boseon Yoo, Jiwoo Lee, Janghoon Ju, Seijun Chung, Soyeon Kim, Jaesik Choi", "author": "Boseon Yoo; Jiwoo Lee; Janghoon Ju; Seijun Chung; Soyeon Kim; Jaesik Choi", "abstract": "We introduce a novel loss function, Covariance Loss, which is conceptually equivalent to conditional neural processes and has a form of regularization so that is applicable to many kinds of neural networks. With the proposed loss, mappings from input variables to target variables are highly affected by dependencies of target variables as well as mean activation and mean dependencies of input and target variables. This nature enables the resulting neural networks to become more robust to noisy observations and recapture missing dependencies from prior information. In order to show the validity of the proposed loss, we conduct extensive sets of experiments on real-world datasets with state-of-the-art models and discuss the benefits and drawbacks of the proposed Covariance Loss.", "bibtex": "@InProceedings{pmlr-v139-yoo21b,\n title = \t {Conditional Temporal Neural Processes with Covariance Loss},\n author = {Yoo, Boseon and Lee, Jiwoo and Ju, Janghoon and Chung, Seijun and Kim, Soyeon and Choi, Jaesik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12051--12061},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoo21b/yoo21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoo21b.html},\n abstract = \t {We introduce a novel loss function, Covariance Loss, which is conceptually equivalent to conditional neural processes and has a form of regularization so that is applicable to many kinds of neural networks. With the proposed loss, mappings from input variables to target variables are highly affected by dependencies of target variables as well as mean activation and mean dependencies of input and target variables. This nature enables the resulting neural networks to become more robust to noisy observations and recapture missing dependencies from prior information. In order to show the validity of the proposed loss, we conduct extensive sets of experiments on real-world datasets with state-of-the-art models and discuss the benefits and drawbacks of the proposed Covariance Loss.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoo21b/yoo21b.pdf", "supp": "", "pdf_size": 6419290, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11587001317959077781&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Graduate School of AI, Korea Advanced Institute of Science and Technology, Republic of Korea; Department of Computer Science and Engineering, Ulsan National Institute of Science and Technology, Republic of Korea; Department of Computer Science and Engineering, Ulsan National Institute of Science and Technology, Republic of Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology, Republic of Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology, Republic of Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology, Republic of Korea + Ineeji Inc., Republic of Korea", "aff_domain": "kaist.ac.kr; ; ; ; ;kaist.ac.kr", "email": "kaist.ac.kr; ; ; ; ;kaist.ac.kr", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yoo21b.html", "aff_unique_index": "0;1;1;0;0;0+2", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Ulsan National Institute of Science and Technology;Ineeji Inc.", "aff_unique_dep": "Graduate School of AI;Department of Computer Science and Engineering;", "aff_unique_url": "https://www.kaist.ac.kr;https://www.unist.ac.kr;", "aff_unique_abbr": "KAIST;UNIST;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0+0", "aff_country_unique": "South Korea" }, { "title": "Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9245", "id": "9245", "proceeding": "http://proceedings.mlr.press/v139/kim21f.html", "slides": "", "author_site": "Jaehyeon Kim, Jungil Kong, Juhee Son", "author": "Jaehyeon Kim; Jungil Kong; Juhee Son", "abstract": "Several recent end-to-end text-to-speech (TTS) models enabling single-stage training and parallel sampling have been proposed, but their sample quality does not match that of two-stage TTS systems. In this work, we present a parallel end-to-end TTS method that generates more natural sounding audio than current two-stage models. Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling. We also propose a stochastic duration predictor to synthesize speech with diverse rhythms from input text. With the uncertainty modeling over latent variables and the stochastic duration predictor, our method expresses the natural one-to-many relationship in which a text input can be spoken in multiple ways with different pitches and rhythms. A subjective human evaluation (mean opinion score, or MOS) on the LJ Speech, a single speaker dataset, shows that our method outperforms the best publicly available TTS systems and achieves a MOS comparable to ground truth.", "bibtex": "@InProceedings{pmlr-v139-kim21f,\n title = \t {Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech},\n author = {Kim, Jaehyeon and Kong, Jungil and Son, Juhee},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5530--5540},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21f/kim21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21f.html},\n abstract = \t {Several recent end-to-end text-to-speech (TTS) models enabling single-stage training and parallel sampling have been proposed, but their sample quality does not match that of two-stage TTS systems. In this work, we present a parallel end-to-end TTS method that generates more natural sounding audio than current two-stage models. Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling. We also propose a stochastic duration predictor to synthesize speech with diverse rhythms from input text. With the uncertainty modeling over latent variables and the stochastic duration predictor, our method expresses the natural one-to-many relationship in which a text input can be spoken in multiple ways with different pitches and rhythms. A subjective human evaluation (mean opinion score, or MOS) on the LJ Speech, a single speaker dataset, shows that our method outperforms the best publicly available TTS systems and achieves a MOS comparable to ground truth.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21f/kim21f.pdf", "supp": "", "pdf_size": 1412255, "gs_citation": 1105, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12414540587288194560&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Kakao Enterprise, Seongnam-si, Gyeonggi-do, Republic of Korea; Kakao Enterprise, Seongnam-si, Gyeonggi-do, Republic of Korea; Kakao Enterprise, Seongnam-si, Gyeonggi-do, Republic of Korea + School of Computing, KAIST, Daejeon, Republic of Korea", "aff_domain": "kakaoenterprise.com; ; ", "email": "kakaoenterprise.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kim21f.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "Kakao Enterprise;KAIST", "aff_unique_dep": ";School of Computing", "aff_unique_url": "https://www.kakaoenterprise.com;https://www.kaist.ac.kr", "aff_unique_abbr": "Kakao Enterprise;KAIST", "aff_campus_unique_index": "1", "aff_campus_unique": ";Daejeon", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "South Korea" }, { "title": "Confidence Scores Make Instance-dependent Label-noise Learning Possible", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10245", "id": "10245", "proceeding": "http://proceedings.mlr.press/v139/berthon21a.html", "slides": "", "author_site": "Antonin Berthon, Bo Han, Gang Niu, Tongliang Liu, Masashi Sugiyama", "author": "Antonin Berthon; Bo Han; Gang Niu; Tongliang Liu; Masashi Sugiyama", "abstract": "In learning with noisy labels, for every instance, its label can randomly walk to other classes following a transition distribution which is named a noise model. Well-studied noise models are all instance-independent, namely, the transition depends only on the original label but not the instance itself, and thus they are less practical in the wild. Fortunately, methods based on instance-dependent noise have been studied, but most of them have to rely on strong assumptions on the noise models. To alleviate this issue, we introduce confidence-scored instance-dependent noise (CSIDN), where each instance-label pair is equipped with a confidence score. We find that with the help of confidence scores, the transition distribution of each instance can be approximately estimated. Similarly to the powerful forward correction for instance-independent noise, we propose a novel instance-level forward correction for CSIDN. We demonstrate the utility and effectiveness of our method through multiple experiments on datasets with synthetic label noise and real-world unknown noise.", "bibtex": "@InProceedings{pmlr-v139-berthon21a,\n title = \t {Confidence Scores Make Instance-dependent Label-noise Learning Possible},\n author = {Berthon, Antonin and Han, Bo and Niu, Gang and Liu, Tongliang and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {825--836},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/berthon21a/berthon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/berthon21a.html},\n abstract = \t {In learning with noisy labels, for every instance, its label can randomly walk to other classes following a transition distribution which is named a noise model. Well-studied noise models are all instance-independent, namely, the transition depends only on the original label but not the instance itself, and thus they are less practical in the wild. Fortunately, methods based on instance-dependent noise have been studied, but most of them have to rely on strong assumptions on the noise models. To alleviate this issue, we introduce confidence-scored instance-dependent noise (CSIDN), where each instance-label pair is equipped with a confidence score. We find that with the help of confidence scores, the transition distribution of each instance can be approximately estimated. Similarly to the powerful forward correction for instance-independent noise, we propose a novel instance-level forward correction for CSIDN. We demonstrate the utility and effectiveness of our method through multiple experiments on datasets with synthetic label noise and real-world unknown noise.}\n}", "pdf": "http://proceedings.mlr.press/v139/berthon21a/berthon21a.pdf", "supp": "", "pdf_size": 3065211, "gs_citation": 137, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4564876080296413002&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "RIKEN+ENS Paris-Saclay; Hong Kong Baptist University; RIKEN; University of Sydney; University of Tokyo", "aff_domain": "gmail.com;comp.hkbu.edu.hk;riken.jp;sydney.edu.au;k.u-tokyo.ac.jp", "email": "gmail.com;comp.hkbu.edu.hk;riken.jp;sydney.edu.au;k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/berthon21a.html", "aff_unique_index": "0+1;2;0;3;4", "aff_unique_norm": "RIKEN;\u00c9cole Normale Sup\u00e9rieure Paris-Saclay;Hong Kong Baptist University;University of Sydney;University of Tokyo", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.riken.jp;https://www.ensparis-saclay.fr;https://www.hkbu.edu.hk;https://www.sydney.edu.au;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "RIKEN;ENS Paris-Saclay;HKBU;USYD;UTokyo", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Paris-Saclay;Hong Kong SAR", "aff_country_unique_index": "0+1;2;0;3;0", "aff_country_unique": "Japan;France;China;Australia" }, { "title": "Confidence-Budget Matching for Sequential Budgeted Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9103", "id": "9103", "proceeding": "http://proceedings.mlr.press/v139/efroni21a.html", "slides": "", "author_site": "Yonathan Efroni, Nadav Merlis, Aadirupa Saha, Shie Mannor", "author": "Yonathan Efroni; Nadav Merlis; Aadirupa Saha; Shie Mannor", "abstract": "A core element in decision-making under uncertainty is the feedback on the quality of the performed actions. However, in many applications, such feedback is restricted. For example, in recommendation systems, repeatedly asking the user to provide feedback on the quality of recommendations will annoy them. In this work, we formalize decision-making problems with querying budget, where there is a (possibly time-dependent) hard limit on the number of reward queries allowed. Specifically, we focus on multi-armed bandits, linear contextual bandits, and reinforcement learning problems. We start by analyzing the performance of \u2018greedy\u2019 algorithms that query a reward whenever they can. We show that in fully stochastic settings, doing so performs surprisingly well, but in the presence of any adversity, this might lead to linear regret. To overcome this issue, we propose the Confidence-Budget Matching (CBM) principle that queries rewards when the confidence intervals are wider than the inverse square root of the available budget. We analyze the performance of CBM based algorithms in different settings and show that it performs well in the presence of adversity in the contexts, initial states, and budgets.", "bibtex": "@InProceedings{pmlr-v139-efroni21a,\n title = \t {Confidence-Budget Matching for Sequential Budgeted Learning},\n author = {Efroni, Yonathan and Merlis, Nadav and Saha, Aadirupa and Mannor, Shie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2937--2947},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/efroni21a/efroni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/efroni21a.html},\n abstract = \t {A core element in decision-making under uncertainty is the feedback on the quality of the performed actions. However, in many applications, such feedback is restricted. For example, in recommendation systems, repeatedly asking the user to provide feedback on the quality of recommendations will annoy them. In this work, we formalize decision-making problems with querying budget, where there is a (possibly time-dependent) hard limit on the number of reward queries allowed. Specifically, we focus on multi-armed bandits, linear contextual bandits, and reinforcement learning problems. We start by analyzing the performance of \u2018greedy\u2019 algorithms that query a reward whenever they can. We show that in fully stochastic settings, doing so performs surprisingly well, but in the presence of any adversity, this might lead to linear regret. To overcome this issue, we propose the Confidence-Budget Matching (CBM) principle that queries rewards when the confidence intervals are wider than the inverse square root of the available budget. We analyze the performance of CBM based algorithms in different settings and show that it performs well in the presence of adversity in the contexts, initial states, and budgets.}\n}", "pdf": "http://proceedings.mlr.press/v139/efroni21a/efroni21a.pdf", "supp": "", "pdf_size": 328265, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6734441028209380676&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Microsoft Research, New York; Technion, Israel; Microsoft Research, New York + Nvidia Research, Israel; Technion, Israel + Nvidia Research, Israel", "aff_domain": "gmail.com;gmail.com; ; ", "email": "gmail.com;gmail.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/efroni21a.html", "aff_unique_index": "0;1;0+2;1+2", "aff_unique_norm": "Microsoft;Technion - Israel Institute of Technology;NVIDIA", "aff_unique_dep": "Microsoft Research;;Nvidia Research", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.technion.ac.il/en/;https://www.nvidia.com/research", "aff_unique_abbr": "MSR;Technion;NV", "aff_campus_unique_index": "0;0;", "aff_campus_unique": "New York;", "aff_country_unique_index": "0;1;0+1;1+1", "aff_country_unique": "United States;Israel" }, { "title": "Conformal prediction interval for dynamic time-series", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10469", "id": "10469", "proceeding": "http://proceedings.mlr.press/v139/xu21h.html", "slides": "", "author_site": "Chen Xu, Yao Xie", "author": "Chen Xu; Yao Xie", "abstract": "We develop a method to construct distribution-free prediction intervals for dynamic time-series, called \\Verb|EnbPI| that wraps around any bootstrap ensemble estimator to construct sequential prediction intervals. \\Verb|EnbPI| is closely related to the conformal prediction (CP) framework but does not require data exchangeability. Theoretically, these intervals attain finite-sample, \\textit{approximately valid} marginal coverage for broad classes of regression functions and time-series with strongly mixing stochastic errors. Computationally, \\Verb|EnbPI| avoids overfitting and requires neither data-splitting nor training multiple ensemble estimators; it efficiently aggregates bootstrap estimators that have been trained. In general, \\Verb|EnbPI| is easy to implement, scalable to producing arbitrarily many prediction intervals sequentially, and well-suited to a wide range of regression functions. We perform extensive real-data analyses to demonstrate its effectiveness.", "bibtex": "@InProceedings{pmlr-v139-xu21h,\n title = \t {Conformal prediction interval for dynamic time-series},\n author = {Xu, Chen and Xie, Yao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11559--11569},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21h/xu21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21h.html},\n abstract = \t {We develop a method to construct distribution-free prediction intervals for dynamic time-series, called \\Verb|EnbPI| that wraps around any bootstrap ensemble estimator to construct sequential prediction intervals. \\Verb|EnbPI| is closely related to the conformal prediction (CP) framework but does not require data exchangeability. Theoretically, these intervals attain finite-sample, \\textit{approximately valid} marginal coverage for broad classes of regression functions and time-series with strongly mixing stochastic errors. Computationally, \\Verb|EnbPI| avoids overfitting and requires neither data-splitting nor training multiple ensemble estimators; it efficiently aggregates bootstrap estimators that have been trained. In general, \\Verb|EnbPI| is easy to implement, scalable to producing arbitrarily many prediction intervals sequentially, and well-suited to a wide range of regression functions. We perform extensive real-data analyses to demonstrate its effectiveness.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21h/xu21h.pdf", "supp": "", "pdf_size": 2930562, "gs_citation": 167, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9397887507156986767&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Industrial and Systems Engineering, Georgia Institute of Technology; Industrial and Systems Engineering, Georgia Institute of Technology", "aff_domain": "gatech.edu;isye.gatech.edu", "email": "gatech.edu;isye.gatech.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/xu21h.html", "aff_unique_index": "0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "Industrial and Systems Engineering", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Conjugate Energy-Based Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10361", "id": "10361", "proceeding": "http://proceedings.mlr.press/v139/wu21a.html", "slides": "/media/icml-2021/Slides/10361.pdf", "author_site": "Hao Wu, Babak Esmaeili, Michael Wick, Jean-Baptiste Tristan, Jan-Willem van de Meent", "author": "Hao Wu; Babak Esmaeili; Michael Wick; Jean-Baptiste Tristan; Jan-Willem Van De Meent", "abstract": "In this paper, we propose conjugate energy-based models (CEBMs), a new class of energy-based models that define a joint density over data and latent variables. The joint density of a CEBM decomposes into an intractable distribution over data and a tractable posterior over latent variables. CEBMs have similar use cases as variational autoencoders, in the sense that they learn an unsupervised mapping from data to latent variables. However, these models omit a generator network, which allows them to learn more flexible notions of similarity between data points. Our experiments demonstrate that conjugate EBMs achieve competitive results in terms of image modelling, predictive power of latent space, and out-of-domain detection on a variety of datasets.", "bibtex": "@InProceedings{pmlr-v139-wu21a,\n title = \t {Conjugate Energy-Based Models},\n author = {Wu, Hao and Esmaeili, Babak and Wick, Michael and Tristan, Jean-Baptiste and Van De Meent, Jan-Willem},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11228--11239},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21a/wu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21a.html},\n abstract = \t {In this paper, we propose conjugate energy-based models (CEBMs), a new class of energy-based models that define a joint density over data and latent variables. The joint density of a CEBM decomposes into an intractable distribution over data and a tractable posterior over latent variables. CEBMs have similar use cases as variational autoencoders, in the sense that they learn an unsupervised mapping from data to latent variables. However, these models omit a generator network, which allows them to learn more flexible notions of similarity between data points. Our experiments demonstrate that conjugate EBMs achieve competitive results in terms of image modelling, predictive power of latent space, and out-of-domain detection on a variety of datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21a/wu21a.pdf", "supp": "", "pdf_size": 2754724, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10147378980451444606&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Khoury College of Computer Sciences, Northeastern University, Boston, MA, USA; Khoury College of Computer Sciences, Northeastern University, Boston, MA, USA; Oracle Labs, MA, USA; Computer Science department, Boston College, MA, USA; Khoury College of Computer Sciences, Northeastern University, Boston, MA, USA", "aff_domain": "northeastern.edu;northeastern.edu; ; ; ", "email": "northeastern.edu;northeastern.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wu21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Northeastern University;Oracle Labs;Boston College", "aff_unique_dep": "Khoury College of Computer Sciences;;Computer Science department", "aff_unique_url": "https://www.northeastern.edu;https://labs.oracle.com;https://www.bostoncollege.edu", "aff_unique_abbr": "NU;Oracle Labs;BC", "aff_campus_unique_index": "0;0;1;2;0", "aff_campus_unique": "Boston;Massachusetts;MA", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Connecting Interpretability and Robustness in Decision Trees through Separation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10107", "id": "10107", "proceeding": "http://proceedings.mlr.press/v139/moshkovitz21a.html", "slides": "", "author_site": "Michal Moshkovitz, Yao-Yuan Yang, Kamalika Chaudhuri", "author": "Michal Moshkovitz; Yao-Yuan Yang; Kamalika Chaudhuri", "abstract": "Recent research has recognized interpretability and robustness as essential properties of trustworthy classification. Curiously, a connection between robustness and interpretability was empirically observed, but the theoretical reasoning behind it remained elusive. In this paper, we rigorously investigate this connection. Specifically, we focus on interpretation using decision trees and robustness to l_{\\infty}-perturbation. Previous works defined the notion of r-separation as a sufficient condition for robustness. We prove upper and lower bounds on the tree size in case the data is r-separated. We then show that a tighter bound on the size is possible when the data is linearly separated. We provide the first algorithm with provable guarantees both on robustness, interpretability, and accuracy in the context of decision trees. Experiments confirm that our algorithm yields classifiers that are both interpretable and robust and have high accuracy.", "bibtex": "@InProceedings{pmlr-v139-moshkovitz21a,\n title = \t {Connecting Interpretability and Robustness in Decision Trees through Separation},\n author = {Moshkovitz, Michal and Yang, Yao-Yuan and Chaudhuri, Kamalika},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7839--7849},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/moshkovitz21a/moshkovitz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/moshkovitz21a.html},\n abstract = \t {Recent research has recognized interpretability and robustness as essential properties of trustworthy classification. Curiously, a connection between robustness and interpretability was empirically observed, but the theoretical reasoning behind it remained elusive. In this paper, we rigorously investigate this connection. Specifically, we focus on interpretation using decision trees and robustness to l_{\\infty}-perturbation. Previous works defined the notion of r-separation as a sufficient condition for robustness. We prove upper and lower bounds on the tree size in case the data is r-separated. We then show that a tighter bound on the size is possible when the data is linearly separated. We provide the first algorithm with provable guarantees both on robustness, interpretability, and accuracy in the context of decision trees. Experiments confirm that our algorithm yields classifiers that are both interpretable and robust and have high accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/moshkovitz21a/moshkovitz21a.pdf", "supp": "", "pdf_size": 545509, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2331497214666374393&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego", "aff_domain": "eng.ucsd.edu;eng.ucsd.edu; ", "email": "eng.ucsd.edu;eng.ucsd.edu; ", "github": "https://github.com/yangarbiter/interpretable-robust-trees", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/moshkovitz21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Connecting Optimal Ex-Ante Collusion in Teams to Extensive-Form Correlation: Faster Algorithms and Positive Complexity Results", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10461", "id": "10461", "proceeding": "http://proceedings.mlr.press/v139/farina21a.html", "slides": "", "author_site": "Gabriele Farina, Andrea Celli, Nicola Gatti, Tuomas Sandholm", "author": "Gabriele Farina; Andrea Celli; Nicola Gatti; Tuomas Sandholm", "abstract": "We focus on the problem of finding an optimal strategy for a team of players that faces an opponent in an imperfect-information zero-sum extensive-form game. Team members are not allowed to communicate during play but can coordinate before the game. In this setting, it is known that the best the team can do is sample a profile of potentially randomized strategies (one per player) from a joint (a.k.a. correlated) probability distribution at the beginning of the game. In this paper, we first provide new modeling results about computing such an optimal distribution by drawing a connection to a different literature on extensive-form correlation. Second, we provide an algorithm that allows one for capping the number of profiles employed in the solution. This begets an anytime algorithm by increasing the cap. We find that often a handful of well-chosen such profiles suffices to reach optimal utility for the team. This enables team members to reach coordination through a simple and understandable plan. Finally, inspired by this observation and leveraging theoretical concepts that we introduce, we develop an efficient column-generation algorithm for finding an optimal distribution for the team. We evaluate it on a suite of common benchmark games. It is three orders of magnitude faster than the prior state of the art on games that the latter can solve and it can also solve several games that were previously unsolvable.", "bibtex": "@InProceedings{pmlr-v139-farina21a,\n title = \t {Connecting Optimal Ex-Ante Collusion in Teams to Extensive-Form Correlation: Faster Algorithms and Positive Complexity Results},\n author = {Farina, Gabriele and Celli, Andrea and Gatti, Nicola and Sandholm, Tuomas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3164--3173},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/farina21a/farina21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/farina21a.html},\n abstract = \t {We focus on the problem of finding an optimal strategy for a team of players that faces an opponent in an imperfect-information zero-sum extensive-form game. Team members are not allowed to communicate during play but can coordinate before the game. In this setting, it is known that the best the team can do is sample a profile of potentially randomized strategies (one per player) from a joint (a.k.a. correlated) probability distribution at the beginning of the game. In this paper, we first provide new modeling results about computing such an optimal distribution by drawing a connection to a different literature on extensive-form correlation. Second, we provide an algorithm that allows one for capping the number of profiles employed in the solution. This begets an anytime algorithm by increasing the cap. We find that often a handful of well-chosen such profiles suffices to reach optimal utility for the team. This enables team members to reach coordination through a simple and understandable plan. Finally, inspired by this observation and leveraging theoretical concepts that we introduce, we develop an efficient column-generation algorithm for finding an optimal distribution for the team. We evaluate it on a suite of common benchmark games. It is three orders of magnitude faster than the prior state of the art on games that the latter can solve and it can also solve several games that were previously unsolvable.}\n}", "pdf": "http://proceedings.mlr.press/v139/farina21a/farina21a.pdf", "supp": "", "pdf_size": 339319, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10074356374982983819&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Computer Science Department, Carnegie Mellon University, Pittsburgh PA 15213+Strategic Machine, Inc.+Strategy Robot, Inc.+Optimized Markets, Inc.; DEIB, Politecnico di Milano, Milano, Italy; DEIB, Politecnico di Milano, Milano, Italy; Computer Science Department, Carnegie Mellon University, Pittsburgh PA 15213+Strategic Machine, Inc.+Strategy Robot, Inc.+Optimized Markets, Inc.", "aff_domain": "cs.cmu.edu;polimi.it;polimi.it;cs.cmu.edu", "email": "cs.cmu.edu;polimi.it;polimi.it;cs.cmu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/farina21a.html", "aff_unique_index": "0+1+2+3;4;4;0+1+2+3", "aff_unique_norm": "Carnegie Mellon University;Strategic Machine, Inc.;Strategy Robot, Inc.;Optimized Markets, Inc.;Politecnico di Milano", "aff_unique_dep": "Computer Science Department;;;;DEIB", "aff_unique_url": "https://www.cmu.edu;;;;https://www.polimi.it", "aff_unique_abbr": "CMU;;;;Politecnico di Milano", "aff_campus_unique_index": "0;2;2;0", "aff_campus_unique": "Pittsburgh;;Milano", "aff_country_unique_index": "0+0+0+0;1;1;0+0+0+0", "aff_country_unique": "United States;Italy" }, { "title": "Connecting Sphere Manifolds Hierarchically for Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10545", "id": "10545", "proceeding": "http://proceedings.mlr.press/v139/scieur21a.html", "slides": "/media/icml-2021/Slides/10545.pdf", "author_site": "Damien Scieur, Youngsung Kim", "author": "Damien Scieur; Youngsung Kim", "abstract": "This paper considers classification problems with hierarchically organized classes. We force the classifier (hyperplane) of each class to belong to a sphere manifold, whose center is the classifier of its super-class. Then, individual sphere manifolds are connected based on their hierarchical relations. Our technique replaces the last layer of a neural network by combining a spherical fully-connected layer with a hierarchical layer. This regularization is shown to improve the performance of widely used deep neural network architectures (ResNet and DenseNet) on publicly available datasets (CIFAR100, CUB200, Stanford dogs, Stanford cars, and Tiny-ImageNet).", "bibtex": "@InProceedings{pmlr-v139-scieur21a,\n title = \t {Connecting Sphere Manifolds Hierarchically for Regularization},\n author = {Scieur, Damien and Kim, Youngsung},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9399--9409},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/scieur21a/scieur21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/scieur21a.html},\n abstract = \t {This paper considers classification problems with hierarchically organized classes. We force the classifier (hyperplane) of each class to belong to a sphere manifold, whose center is the classifier of its super-class. Then, individual sphere manifolds are connected based on their hierarchical relations. Our technique replaces the last layer of a neural network by combining a spherical fully-connected layer with a hierarchical layer. This regularization is shown to improve the performance of widely used deep neural network architectures (ResNet and DenseNet) on publicly available datasets (CIFAR100, CUB200, Stanford dogs, Stanford cars, and Tiny-ImageNet).}\n}", "pdf": "http://proceedings.mlr.press/v139/scieur21a/scieur21a.pdf", "supp": "", "pdf_size": 3171297, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10766676068622154876&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Samsung SAIT AI Lab, Montreal; Samsung Advanced Institute of Technology (SAIT)", "aff_domain": "gmail.com;gmail.com", "email": "gmail.com;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/scieur21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Samsung", "aff_unique_dep": "AI Lab", "aff_unique_url": "https://www.sait.samsung.com", "aff_unique_abbr": "SAIT", "aff_campus_unique_index": "0", "aff_campus_unique": "Montreal;", "aff_country_unique_index": "0;1", "aff_country_unique": "Canada;South Korea" }, { "title": "Consensus Control for Decentralized Deep Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10027", "id": "10027", "proceeding": "http://proceedings.mlr.press/v139/kong21a.html", "slides": "", "author_site": "Lingjing Kong, Tao Lin, Anastasiia Koloskova, Martin Jaggi, Sebastian Stich", "author": "Lingjing Kong; Tao Lin; Anastasia Koloskova; Martin Jaggi; Sebastian Stich", "abstract": "Decentralized training of deep learning models enables on-device learning over networks, as well as efficient scaling to large compute clusters. Experiments in earlier works reveal that, even in a data-center setup, decentralized training often suffers from the degradation in the quality of the model: the training and test performance of models trained in a decentralized fashion is in general worse than that of models trained in a centralized fashion, and this performance drop is impacted by parameters such as network size, communication topology and data partitioning. We identify the changing consensus distance between devices as a key parameter to explain the gap between centralized and decentralized training. We show in theory that when the training consensus distance is lower than a critical quantity, decentralized training converges as fast as the centralized counterpart. We empirically validate that the relation between generalization performance and consensus distance is consistent with this theoretical observation. Our empirical insights allow the principled design of better decentralized training schemes that mitigate the performance drop. To this end, we provide practical training guidelines and exemplify its effectiveness on the data-center setup as the important first step.", "bibtex": "@InProceedings{pmlr-v139-kong21a,\n title = \t {Consensus Control for Decentralized Deep Learning},\n author = {Kong, Lingjing and Lin, Tao and Koloskova, Anastasia and Jaggi, Martin and Stich, Sebastian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5686--5696},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kong21a/kong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kong21a.html},\n abstract = \t {Decentralized training of deep learning models enables on-device learning over networks, as well as efficient scaling to large compute clusters. Experiments in earlier works reveal that, even in a data-center setup, decentralized training often suffers from the degradation in the quality of the model: the training and test performance of models trained in a decentralized fashion is in general worse than that of models trained in a centralized fashion, and this performance drop is impacted by parameters such as network size, communication topology and data partitioning. We identify the changing consensus distance between devices as a key parameter to explain the gap between centralized and decentralized training. We show in theory that when the training consensus distance is lower than a critical quantity, decentralized training converges as fast as the centralized counterpart. We empirically validate that the relation between generalization performance and consensus distance is consistent with this theoretical observation. Our empirical insights allow the principled design of better decentralized training schemes that mitigate the performance drop. To this end, we provide practical training guidelines and exemplify its effectiveness on the data-center setup as the important first step.}\n}", "pdf": "http://proceedings.mlr.press/v139/kong21a/kong21a.pdf", "supp": "", "pdf_size": 4560317, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1073856743161560892&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "EPFL; EPFL; EPFL; EPFL; EPFL", "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kong21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Conservative Objective Models for Effective Offline Model-Based Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9385", "id": "9385", "proceeding": "http://proceedings.mlr.press/v139/trabucco21a.html", "slides": "/media/icml-2021/Slides/9385.pdf", "author_site": "Brandon Trabucco, Aviral Kumar, Xinyang Geng, Sergey Levine", "author": "Brandon Trabucco; Aviral Kumar; Xinyang Geng; Sergey Levine", "abstract": "In this paper, we aim to solve data-driven model-based optimization (MBO) problems, where the goal is to find a design input that maximizes an unknown objective function provided access to only a static dataset of inputs and their corresponding objective values. Such data-driven optimization procedures are the only practical methods in many real-world domains where active data collection is expensive (e.g., when optimizing over proteins) or dangerous (e.g., when optimizing over aircraft designs, actively evaluating malformed aircraft designs is unsafe). Typical methods for MBO that optimize the input against a learned model of the unknown score function are affected by erroneous overestimation in the learned model caused due to distributional shift, that drives the optimizer to low-scoring or invalid inputs. To overcome this, we propose conservative objective models (COMs), a method that learns a model of the objective function which lower bounds the actual value of the ground-truth objective on out-of-distribution inputs and uses it for optimization. In practice, COMs outperform a number existing methods on a wide range of MBO problems, including optimizing controller parameters, robot morphologies, and superconducting materials.", "bibtex": "@InProceedings{pmlr-v139-trabucco21a,\n title = \t {Conservative Objective Models for Effective Offline Model-Based Optimization},\n author = {Trabucco, Brandon and Kumar, Aviral and Geng, Xinyang and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10358--10368},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/trabucco21a/trabucco21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/trabucco21a.html},\n abstract = \t {In this paper, we aim to solve data-driven model-based optimization (MBO) problems, where the goal is to find a design input that maximizes an unknown objective function provided access to only a static dataset of inputs and their corresponding objective values. Such data-driven optimization procedures are the only practical methods in many real-world domains where active data collection is expensive (e.g., when optimizing over proteins) or dangerous (e.g., when optimizing over aircraft designs, actively evaluating malformed aircraft designs is unsafe). Typical methods for MBO that optimize the input against a learned model of the unknown score function are affected by erroneous overestimation in the learned model caused due to distributional shift, that drives the optimizer to low-scoring or invalid inputs. To overcome this, we propose conservative objective models (COMs), a method that learns a model of the objective function which lower bounds the actual value of the ground-truth objective on out-of-distribution inputs and uses it for optimization. In practice, COMs outperform a number existing methods on a wide range of MBO problems, including optimizing controller parameters, robot morphologies, and superconducting materials.}\n}", "pdf": "http://proceedings.mlr.press/v139/trabucco21a/trabucco21a.pdf", "supp": "", "pdf_size": 901633, "gs_citation": 108, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10951629581873877852&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Electrical Engineering and Computer Sciences, University of California Berkeley; Department of Electrical Engineering and Computer Sciences, University of California Berkeley; Department of Electrical Engineering and Computer Sciences, University of California Berkeley; Department of Electrical Engineering and Computer Sciences, University of California Berkeley", "aff_domain": "berkeley.edu;berkeley.edu; ; ", "email": "berkeley.edu;berkeley.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/trabucco21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Consistent Nonparametric Methods for Network Assisted Covariate Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9083", "id": "9083", "proceeding": "http://proceedings.mlr.press/v139/mao21a.html", "slides": "/media/icml-2021/Slides/9083.pdf", "author_site": "Xueyu Mao, Deepayan Chakrabarti, Purnamrita Sarkar", "author": "Xueyu Mao; Deepayan Chakrabarti; Purnamrita Sarkar", "abstract": "Networks with node covariates are commonplace: for example, people in a social network have interests, or product preferences, etc. If we know the covariates for some nodes, can we infer them for the remaining nodes? In this paper we propose a new similarity measure between two nodes based on the patterns of their 2-hop neighborhoods. We show that a simple algorithm (CN-VEC) like nearest neighbor regression with this metric is consistent for a wide range of models when the degree grows faster than $n^{1/3}$ up-to logarithmic factors, where $n$ is the number of nodes. For \"low-rank\" latent variable models, the natural contender will be to estimate the latent variables using SVD and use them for non-parametric regression. While we show consistency of this method under less stringent sparsity conditions, our experimental results suggest that the simple local CN-VEC method either outperforms the global SVD-RBF method, or has comparable performance for low rank models. We also present simulated and real data experiments to show the effectiveness of our algorithms compared to the state of the art.", "bibtex": "@InProceedings{pmlr-v139-mao21a,\n title = \t {Consistent Nonparametric Methods for Network Assisted Covariate Estimation},\n author = {Mao, Xueyu and Chakrabarti, Deepayan and Sarkar, Purnamrita},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7435--7446},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mao21a/mao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mao21a.html},\n abstract = \t {Networks with node covariates are commonplace: for example, people in a social network have interests, or product preferences, etc. If we know the covariates for some nodes, can we infer them for the remaining nodes? In this paper we propose a new similarity measure between two nodes based on the patterns of their 2-hop neighborhoods. We show that a simple algorithm (CN-VEC) like nearest neighbor regression with this metric is consistent for a wide range of models when the degree grows faster than $n^{1/3}$ up-to logarithmic factors, where $n$ is the number of nodes. For \"low-rank\" latent variable models, the natural contender will be to estimate the latent variables using SVD and use them for non-parametric regression. While we show consistency of this method under less stringent sparsity conditions, our experimental results suggest that the simple local CN-VEC method either outperforms the global SVD-RBF method, or has comparable performance for low rank models. We also present simulated and real data experiments to show the effectiveness of our algorithms compared to the state of the art.}\n}", "pdf": "http://proceedings.mlr.press/v139/mao21a/mao21a.pdf", "supp": "", "pdf_size": 3983903, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16424872156988166392&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science; Department of Information, Risk, and Operations Management; Department of Statistics and Data Sciences", "aff_domain": "cs.utexas.edu;utexas.edu;austin.utexas.edu", "email": "cs.utexas.edu;utexas.edu;austin.utexas.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/mao21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Unknown Institution;University of Florida;University of Texas at Austin", "aff_unique_dep": "Department of Computer Science;Department of Information, Risk, and Operations Management;Department of Statistics and Data Sciences", "aff_unique_url": ";https://warrington.ufl.edu/;https://www.stat.utexas.edu", "aff_unique_abbr": ";;UT Austin", "aff_campus_unique_index": "1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "1;1", "aff_country_unique": ";United States" }, { "title": "Consistent regression when oblivious outliers overwhelm", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10579", "id": "10579", "proceeding": "http://proceedings.mlr.press/v139/d-orsi21a.html", "slides": "", "author_site": "Tommaso d'Orsi, Gleb Novikov, David Steurer", "author": "Tommaso D\u2019Orsi; Gleb Novikov; David Steurer", "abstract": "We consider a robust linear regression model $y=X\\beta^* + \\eta$, where an adversary oblivious to the design $X\\in \\mathbb{R}^{n\\times d}$ may choose $\\eta$ to corrupt all but an $\\alpha$ fraction of the observations $y$ in an arbitrary way. Prior to our work, even for Gaussian $X$, no estimator for $\\beta^*$ was known to be consistent in this model except for quadratic sample size $n \\gtrsim (d/\\alpha)^2$ or for logarithmic inlier fraction $\\alpha\\ge 1/\\log n$. We show that consistent estimation is possible with nearly linear sample size and inverse-polynomial inlier fraction. Concretely, we show that the Huber loss estimator is consistent for every sample size $n= \\omega(d/\\alpha^2)$ and achieves an error rate of $O(d/\\alpha^2n)^{1/2}$ (both bounds are optimal up to constant factors). Our results extend to designs far beyond the Gaussian case and only require the column span of $X$ to not contain approximately sparse vectors (similar to the kind of assumption commonly made about the kernel space for compressed sensing). We provide two technically similar proofs. One proof is phrased in terms of strong convexity, extending work of [Tsakonas et al. \u201914], and particularly short. The other proof highlights a connection between the Huber loss estimator and high-dimensional median computations. In the special case of Gaussian designs, this connection leads us to a strikingly simple algorithm based on computing coordinate-wise medians that achieves nearly optimal guarantees in linear time, and that can exploit sparsity of $\\beta^*$. The model studied here also captures heavy-tailed noise distributions that may not even have a first moment.", "bibtex": "@InProceedings{pmlr-v139-d-orsi21a,\n title = \t {Consistent regression when oblivious outliers overwhelm},\n author = {D'Orsi, Tommaso and Novikov, Gleb and Steurer, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2297--2306},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/d-orsi21a/d-orsi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/d-orsi21a.html},\n abstract = \t {We consider a robust linear regression model $y=X\\beta^* + \\eta$, where an adversary oblivious to the design $X\\in \\mathbb{R}^{n\\times d}$ may choose $\\eta$ to corrupt all but an $\\alpha$ fraction of the observations $y$ in an arbitrary way. Prior to our work, even for Gaussian $X$, no estimator for $\\beta^*$ was known to be consistent in this model except for quadratic sample size $n \\gtrsim (d/\\alpha)^2$ or for logarithmic inlier fraction $\\alpha\\ge 1/\\log n$. We show that consistent estimation is possible with nearly linear sample size and inverse-polynomial inlier fraction. Concretely, we show that the Huber loss estimator is consistent for every sample size $n= \\omega(d/\\alpha^2)$ and achieves an error rate of $O(d/\\alpha^2n)^{1/2}$ (both bounds are optimal up to constant factors). Our results extend to designs far beyond the Gaussian case and only require the column span of $X$ to not contain approximately sparse vectors (similar to the kind of assumption commonly made about the kernel space for compressed sensing). We provide two technically similar proofs. One proof is phrased in terms of strong convexity, extending work of [Tsakonas et al. \u201914], and particularly short. The other proof highlights a connection between the Huber loss estimator and high-dimensional median computations. In the special case of Gaussian designs, this connection leads us to a strikingly simple algorithm based on computing coordinate-wise medians that achieves nearly optimal guarantees in linear time, and that can exploit sparsity of $\\beta^*$. The model studied here also captures heavy-tailed noise distributions that may not even have a first moment.}\n}", "pdf": "http://proceedings.mlr.press/v139/d-orsi21a/d-orsi21a.pdf", "supp": "", "pdf_size": 282362, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2486734469223148287&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, ETH Z\u00fcrich, Switzerland; Department of Computer Science, ETH Z\u00fcrich, Switzerland; Department of Computer Science, ETH Z\u00fcrich, Switzerland", "aff_domain": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch", "email": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/d-orsi21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Context-Aware Online Collective Inference for Templated Graphical Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9525", "id": "9525", "proceeding": "http://proceedings.mlr.press/v139/dickens21a.html", "slides": "", "author_site": "Charles Dickens, Connor Pryor, Eriq Augustine, Alexander Miller, Lise Getoor", "author": "Charles Dickens; Connor Pryor; Eriq Augustine; Alexander Miller; Lise Getoor", "abstract": "In this work, we examine online collective inference, the problem of maintaining and performing inference over a sequence of evolving graphical models. We utilize templated graphical models (TGM), a general class of graphical models expressed via templates and instantiated with data. A key challenge is minimizing the cost of instantiating the updated model. To address this, we define a class of exact and approximate context-aware methods for updating an existing TGM. These methods avoid a full re-instantiation by using the context of the updates to only add relevant components to the graphical model. Further, we provide stability bounds for the general online inference problem and regret bounds for a proposed approximation. Finally, we implement our approach in probabilistic soft logic, and test it on several online collective inference tasks. Through these experiments we verify the bounds on regret and stability, and show that our approximate online approach consistently runs two to five times faster than the offline alternative while, surprisingly, maintaining the quality of the predictions.", "bibtex": "@InProceedings{pmlr-v139-dickens21a,\n title = \t {Context-Aware Online Collective Inference for Templated Graphical Models},\n author = {Dickens, Charles and Pryor, Connor and Augustine, Eriq and Miller, Alexander and Getoor, Lise},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2707--2716},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dickens21a/dickens21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dickens21a.html},\n abstract = \t {In this work, we examine online collective inference, the problem of maintaining and performing inference over a sequence of evolving graphical models. We utilize templated graphical models (TGM), a general class of graphical models expressed via templates and instantiated with data. A key challenge is minimizing the cost of instantiating the updated model. To address this, we define a class of exact and approximate context-aware methods for updating an existing TGM. These methods avoid a full re-instantiation by using the context of the updates to only add relevant components to the graphical model. Further, we provide stability bounds for the general online inference problem and regret bounds for a proposed approximation. Finally, we implement our approach in probabilistic soft logic, and test it on several online collective inference tasks. Through these experiments we verify the bounds on regret and stability, and show that our approximate online approach consistently runs two to five times faster than the offline alternative while, surprisingly, maintaining the quality of the predictions.}\n}", "pdf": "http://proceedings.mlr.press/v139/dickens21a/dickens21a.pdf", "supp": "", "pdf_size": 439861, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5171604093571287584&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Engineering, University of California Santa Cruz, California, United States; Department of Computer Science and Engineering, University of California Santa Cruz, California, United States; Department of Computer Science and Engineering, University of California Santa Cruz, California, United States; Department of Computer Science and Engineering, University of California Santa Cruz, California, United States; Department of Computer Science and Engineering, University of California Santa Cruz, California, United States", "aff_domain": "ucsc.edu; ; ; ; ", "email": "ucsc.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/dickens21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of California Santa Cruz", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsc.edu", "aff_unique_abbr": "UCSC", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Santa Cruz", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Continual Learning in the Teacher-Student Setup: Impact of Task Similarity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9371", "id": "9371", "proceeding": "http://proceedings.mlr.press/v139/lee21e.html", "slides": "", "author_site": "Sebastian Lee, Sebastian Goldt, Andrew Saxe", "author": "Sebastian Lee; Sebastian Goldt; Andrew Saxe", "abstract": "Continual learning{\u2014}the ability to learn many tasks in sequence{\u2014}is critical for artificial learning systems. Yet standard training methods for deep networks often suffer from catastrophic forgetting, where learning new tasks erases knowledge of the earlier tasks. While catastrophic forgetting labels the problem, the theoretical reasons for interference between tasks remain unclear. Here, we attempt to narrow this gap between theory and practice by studying continual learning in the teacher-student setup. We extend previous analytical work on two-layer networks in the teacher-student setup to multiple teachers. Using each teacher to represent a different task, we investigate how the relationship between teachers affects the amount of forgetting and transfer exhibited by the student when the task switches. In line with recent work, we find that when tasks depend on similar features, intermediate task similarity leads to greatest forgetting. However, feature similarity is only one way in which tasks may be related. The teacher-student approach allows us to disentangle task similarity at the level of \\emph{readouts} (hidden-to-output weights) as well as \\emph{features} (input-to-hidden weights). We find a complex interplay between both types of similarity, initial transfer/forgetting rates, maximum transfer/forgetting, and the long-time (post-switch) amount of transfer/forgetting. Together, these results help illuminate the diverse factors contributing to catastrophic forgetting.", "bibtex": "@InProceedings{pmlr-v139-lee21e,\n title = \t {Continual Learning in the Teacher-Student Setup: Impact of Task Similarity},\n author = {Lee, Sebastian and Goldt, Sebastian and Saxe, Andrew},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6109--6119},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21e/lee21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21e.html},\n abstract = \t {Continual learning{\u2014}the ability to learn many tasks in sequence{\u2014}is critical for artificial learning systems. Yet standard training methods for deep networks often suffer from catastrophic forgetting, where learning new tasks erases knowledge of the earlier tasks. While catastrophic forgetting labels the problem, the theoretical reasons for interference between tasks remain unclear. Here, we attempt to narrow this gap between theory and practice by studying continual learning in the teacher-student setup. We extend previous analytical work on two-layer networks in the teacher-student setup to multiple teachers. Using each teacher to represent a different task, we investigate how the relationship between teachers affects the amount of forgetting and transfer exhibited by the student when the task switches. In line with recent work, we find that when tasks depend on similar features, intermediate task similarity leads to greatest forgetting. However, feature similarity is only one way in which tasks may be related. The teacher-student approach allows us to disentangle task similarity at the level of \\emph{readouts} (hidden-to-output weights) as well as \\emph{features} (input-to-hidden weights). We find a complex interplay between both types of similarity, initial transfer/forgetting rates, maximum transfer/forgetting, and the long-time (post-switch) amount of transfer/forgetting. Together, these results help illuminate the diverse factors contributing to catastrophic forgetting.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21e/lee21e.pdf", "supp": "", "pdf_size": 630421, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4325632592050646056&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Imperial College, London, UK; International School of Advanced Studies (SISSA), Trieste, Italy; Department of Experimental Psychology, University of Oxford, UK + CIFAR Azrieli Global Scholars program, CIFAR, Toronto, Canada + Facebook AI Research", "aff_domain": "imperial.ac.uk; ;psy.ox.ac.uk", "email": "imperial.ac.uk; ;psy.ox.ac.uk", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lee21e.html", "aff_unique_index": "0;1;2+3+4", "aff_unique_norm": "Imperial College London;International School of Advanced Studies;University of Oxford;CIFAR;Meta", "aff_unique_dep": ";;Department of Experimental Psychology;Azrieli Global Scholars program;Facebook AI Research", "aff_unique_url": "https://www.imperial.ac.uk;https://www.sissa.it;https://www.ox.ac.uk;https://www.cifar.ca;https://research.facebook.com", "aff_unique_abbr": "ICL;SISSA;Oxford;CIFAR;FAIR", "aff_campus_unique_index": "0;1;2+3", "aff_campus_unique": "London;Trieste;Oxford;Toronto;", "aff_country_unique_index": "0;1;0+2+3", "aff_country_unique": "United Kingdom;Italy;Canada;United States" }, { "title": "Continuous Coordination As a Realistic Scenario for Lifelong Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8975", "id": "8975", "proceeding": "http://proceedings.mlr.press/v139/nekoei21a.html", "slides": "/media/icml-2021/Slides/8975.pdf", "author_site": "Hadi Nekoei, Akilesh Badrinaaraayanan, Aaron Courville, Sarath Chandar", "author": "Hadi Nekoei; Akilesh Badrinaaraayanan; Aaron Courville; Sarath Chandar", "abstract": "Current deep reinforcement learning (RL) algorithms are still highly task-specific and lack the ability to generalize to new environments. Lifelong learning (LLL), however, aims at solving multiple tasks sequentially by efficiently transferring and using knowledge between tasks. Despite a surge of interest in lifelong RL in recent years, the lack of a realistic testbed makes robust evaluation of LLL algorithms difficult. Multi-agent RL (MARL), on the other hand, can be seen as a natural scenario for lifelong RL due to its inherent non-stationarity, since the agents\u2019 policies change over time. In this work, we introduce a multi-agent lifelong learning testbed that supports both zero-shot and few-shot settings. Our setup is based on Hanabi {\u2014} a partially-observable, fully cooperative multi-agent game that has been shown to be challenging for zero-shot coordination. Its large strategy space makes it a desirable environment for lifelong RL tasks. We evaluate several recent MARL methods, and benchmark state-of-the-art LLL algorithms in limited memory and computation regimes to shed light on their strengths and weaknesses. This continual learning paradigm also provides us with a pragmatic way of going beyond centralized training which is the most commonly used training protocol in MARL. We empirically show that the agents trained in our setup are able to coordinate well with unseen agents, without any additional assumptions made by previous works. The code and all pre-trained models are available at https://github.com/chandar-lab/Lifelong-Hanabi.", "bibtex": "@InProceedings{pmlr-v139-nekoei21a,\n title = \t {Continuous Coordination As a Realistic Scenario for Lifelong Learning},\n author = {Nekoei, Hadi and Badrinaaraayanan, Akilesh and Courville, Aaron and Chandar, Sarath},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8016--8024},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nekoei21a/nekoei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nekoei21a.html},\n abstract = \t {Current deep reinforcement learning (RL) algorithms are still highly task-specific and lack the ability to generalize to new environments. Lifelong learning (LLL), however, aims at solving multiple tasks sequentially by efficiently transferring and using knowledge between tasks. Despite a surge of interest in lifelong RL in recent years, the lack of a realistic testbed makes robust evaluation of LLL algorithms difficult. Multi-agent RL (MARL), on the other hand, can be seen as a natural scenario for lifelong RL due to its inherent non-stationarity, since the agents\u2019 policies change over time. In this work, we introduce a multi-agent lifelong learning testbed that supports both zero-shot and few-shot settings. Our setup is based on Hanabi {\u2014} a partially-observable, fully cooperative multi-agent game that has been shown to be challenging for zero-shot coordination. Its large strategy space makes it a desirable environment for lifelong RL tasks. We evaluate several recent MARL methods, and benchmark state-of-the-art LLL algorithms in limited memory and computation regimes to shed light on their strengths and weaknesses. This continual learning paradigm also provides us with a pragmatic way of going beyond centralized training which is the most commonly used training protocol in MARL. We empirically show that the agents trained in our setup are able to coordinate well with unseen agents, without any additional assumptions made by previous works. The code and all pre-trained models are available at https://github.com/chandar-lab/Lifelong-Hanabi.}\n}", "pdf": "http://proceedings.mlr.press/v139/nekoei21a/nekoei21a.pdf", "supp": "", "pdf_size": 5059885, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6060197404839031996&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Mila+Universit \u00b4e de Montr \u00b4eal+Canada CIFAR AI Chair; Mila+Universit \u00b4e de Montr \u00b4eal+Canada CIFAR AI Chair+\u00b4Ecole Polytechnique de Montr \u00b4eal; Mila+Universit \u00b4e de Montr \u00b4eal+Canada CIFAR AI Chair; Mila+\u00b4Ecole Polytechnique de Montr \u00b4eal", "aff_domain": "gmail.com;gmail.com; ; ", "email": "gmail.com;gmail.com; ; ", "github": "https://github.com/chandar-lab/Lifelong-Hanabi", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/nekoei21a.html", "aff_unique_index": "0+1+2;0+1+2+3;0+1+2;0+3", "aff_unique_norm": "Mila;Universit\u00e9 de Montr\u00e9al;Canadian Institute for Advanced Research;Ecole Polytechnique de Montr\u00e9al", "aff_unique_dep": "Quebec Artificial Intelligence Institute;;AI Chair;", "aff_unique_url": "https://mila.quebec;https://www.umontreal.ca;https://www.cifar.ca;https://www.polymtl.ca", "aff_unique_abbr": "Mila;UdeM;CIFAR;Polytechnique Montr\u00e9al", "aff_campus_unique_index": ";1;;1", "aff_campus_unique": ";Montr\u00e9al", "aff_country_unique_index": "0+0+0;0+0+0+0;0+0+0;0+0", "aff_country_unique": "Canada" }, { "title": "Continuous-time Model-based Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8565", "id": "8565", "proceeding": "http://proceedings.mlr.press/v139/yildiz21a.html", "slides": "", "author_site": "Cagatay Yildiz, Markus Heinonen, Harri L\u00e4hdesm\u00e4ki", "author": "Cagatay Yildiz; Markus Heinonen; Harri L\u00e4hdesm\u00e4ki", "abstract": "Model-based reinforcement learning (MBRL) approaches rely on discrete-time state transition models whereas physical systems and the vast majority of control tasks operate in continuous-time. To avoid time-discretization approximation of the underlying process, we propose a continuous-time MBRL framework based on a novel actor-critic method. Our approach also infers the unknown state evolution differentials with Bayesian neural ordinary differential equations (ODE) to account for epistemic uncertainty. We implement and test our method on a new ODE-RL suite that explicitly solves continuous-time control systems. Our experiments illustrate that the model is robust against irregular and noisy data, and can solve classic control problems in a sample-efficient manner.", "bibtex": "@InProceedings{pmlr-v139-yildiz21a,\n title = \t {Continuous-time Model-based Reinforcement Learning},\n author = {Yildiz, Cagatay and Heinonen, Markus and L{\\\"a}hdesm{\\\"a}ki, Harri},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12009--12018},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yildiz21a/yildiz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yildiz21a.html},\n abstract = \t {Model-based reinforcement learning (MBRL) approaches rely on discrete-time state transition models whereas physical systems and the vast majority of control tasks operate in continuous-time. To avoid time-discretization approximation of the underlying process, we propose a continuous-time MBRL framework based on a novel actor-critic method. Our approach also infers the unknown state evolution differentials with Bayesian neural ordinary differential equations (ODE) to account for epistemic uncertainty. We implement and test our method on a new ODE-RL suite that explicitly solves continuous-time control systems. Our experiments illustrate that the model is robust against irregular and noisy data, and can solve classic control problems in a sample-efficient manner.}\n}", "pdf": "http://proceedings.mlr.press/v139/yildiz21a/yildiz21a.pdf", "supp": "", "pdf_size": 5269370, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14746718008006143630&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Aalto University, Finland; Department of Computer Science, Aalto University, Finland; Department of Computer Science, Aalto University, Finland", "aff_domain": "aalto.fi; ; ", "email": "aalto.fi; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yildiz21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Aalto University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.aalto.fi", "aff_unique_abbr": "Aalto", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Finland" }, { "title": "Contrastive Learning Inverts the Data Generating Process", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9643", "id": "9643", "proceeding": "http://proceedings.mlr.press/v139/zimmermann21a.html", "slides": "/media/icml-2021/Slides/9643.pdf", "author_site": "Roland S. Zimmermann, Yash Sharma, Steffen Schneider, Matthias Bethge, Wieland Brendel", "author": "Roland S. Zimmermann; Yash Sharma; Steffen Schneider; Matthias Bethge; Wieland Brendel", "abstract": "Contrastive learning has recently seen tremendous success in self-supervised learning. So far, however, it is largely unclear why the learned representations generalize so effectively to a large variety of downstream tasks. We here prove that feedforward models trained with objectives belonging to the commonly used InfoNCE family learn to implicitly invert the underlying generative model of the observed data. While the proofs make certain statistical assumptions about the generative model, we observe empirically that our findings hold even if these assumptions are severely violated. Our theory highlights a fundamental connection between contrastive learning, generative modeling, and nonlinear independent component analysis, thereby furthering our understanding of the learned representations as well as providing a theoretical foundation to derive more effective contrastive losses.", "bibtex": "@InProceedings{pmlr-v139-zimmermann21a,\n title = \t {Contrastive Learning Inverts the Data Generating Process},\n author = {Zimmermann, Roland S. and Sharma, Yash and Schneider, Steffen and Bethge, Matthias and Brendel, Wieland},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12979--12990},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zimmermann21a/zimmermann21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zimmermann21a.html},\n abstract = \t {Contrastive learning has recently seen tremendous success in self-supervised learning. So far, however, it is largely unclear why the learned representations generalize so effectively to a large variety of downstream tasks. We here prove that feedforward models trained with objectives belonging to the commonly used InfoNCE family learn to implicitly invert the underlying generative model of the observed data. While the proofs make certain statistical assumptions about the generative model, we observe empirically that our findings hold even if these assumptions are severely violated. Our theory highlights a fundamental connection between contrastive learning, generative modeling, and nonlinear independent component analysis, thereby furthering our understanding of the learned representations as well as providing a theoretical foundation to derive more effective contrastive losses.}\n}", "pdf": "http://proceedings.mlr.press/v139/zimmermann21a/zimmermann21a.pdf", "supp": "", "pdf_size": 2269222, "gs_citation": 269, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6297973976914221052&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of T\u00fcbingen, T\u00fcbingen, Germany+IMPRS for Intelligent Systems, T\u00fcbingen, Germany; University of T\u00fcbingen, T\u00fcbingen, Germany+IMPRS for Intelligent Systems, T\u00fcbingen, Germany+EPFL, Geneva, Switzerland; University of T\u00fcbingen, T\u00fcbingen, Germany+IMPRS for Intelligent Systems, T\u00fcbingen, Germany+EPFL, Geneva, Switzerland; University of T\u00fcbingen, T\u00fcbingen, Germany; University of T\u00fcbingen, T\u00fcbingen, Germany", "aff_domain": "uni-tuebingen.de; ; ; ; ", "email": "uni-tuebingen.de; ; ; ; ", "github": "brendel-group.github.io/cl-ica/", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zimmermann21a.html", "aff_unique_index": "0+1;0+1+2;0+1+2;0;0", "aff_unique_norm": "University of T\u00fcbingen;IMPRS for Intelligent Systems;EPFL", "aff_unique_dep": ";;", "aff_unique_url": "https://www.uni-tuebingen.de/;https://www.imprs.tuebingen.mpg.de;https://www.epfl.ch", "aff_unique_abbr": "Uni T\u00fcbingen;IMPRS;EPFL", "aff_campus_unique_index": "0+0;0+0+1;0+0+1;0;0", "aff_campus_unique": "T\u00fcbingen;Geneva", "aff_country_unique_index": "0+0;0+0+1;0+0+1;0;0", "aff_country_unique": "Germany;Switzerland" }, { "title": "Controlling Graph Dynamics with Reinforcement Learning and Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9909", "id": "9909", "proceeding": "http://proceedings.mlr.press/v139/meirom21a.html", "slides": "", "author_site": "Eli Meirom, Haggai Maron, Shie Mannor, Gal Chechik", "author": "Eli Meirom; Haggai Maron; Shie Mannor; Gal Chechik", "abstract": "We consider the problem of controlling a partially-observed dynamic process on a graph by a limited number of interventions. This problem naturally arises in contexts such as scheduling virus tests to curb an epidemic; targeted marketing in order to promote a product; and manually inspecting posts to detect fake news spreading on social networks. We formulate this setup as a sequential decision problem over a temporal graph process. In face of an exponential state space, combinatorial action space and partial observability, we design a novel tractable scheme to control dynamical processes on temporal graphs. We successfully apply our approach to two popular problems that fall into our framework: prioritizing which nodes should be tested in order to curb the spread of an epidemic, and influence maximization on a graph.", "bibtex": "@InProceedings{pmlr-v139-meirom21a,\n title = \t {Controlling Graph Dynamics with Reinforcement Learning and Graph Neural Networks},\n author = {Meirom, Eli and Maron, Haggai and Mannor, Shie and Chechik, Gal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7565--7577},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/meirom21a/meirom21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/meirom21a.html},\n abstract = \t {We consider the problem of controlling a partially-observed dynamic process on a graph by a limited number of interventions. This problem naturally arises in contexts such as scheduling virus tests to curb an epidemic; targeted marketing in order to promote a product; and manually inspecting posts to detect fake news spreading on social networks. We formulate this setup as a sequential decision problem over a temporal graph process. In face of an exponential state space, combinatorial action space and partial observability, we design a novel tractable scheme to control dynamical processes on temporal graphs. We successfully apply our approach to two popular problems that fall into our framework: prioritizing which nodes should be tested in order to curb the spread of an epidemic, and influence maximization on a graph.}\n}", "pdf": "http://proceedings.mlr.press/v139/meirom21a/meirom21a.pdf", "supp": "", "pdf_size": 2057304, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15386428419702412998&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/meirom21a.html" }, { "title": "Convex Regularization in Monte-Carlo Tree Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10667", "id": "10667", "proceeding": "http://proceedings.mlr.press/v139/dam21a.html", "slides": "", "author_site": "Tuan Q Dam, Carlo D'Eramo, Jan Peters, Joni Pajarinen", "author": "Tuan Q Dam; Carlo D\u2019Eramo; Jan Peters; Joni Pajarinen", "abstract": "Monte-Carlo planning and Reinforcement Learning (RL) are essential to sequential decision making. The recent AlphaGo and AlphaZero algorithms have shown how to successfully combine these two paradigms to solve large-scale sequential decision problems. These methodologies exploit a variant of the well-known UCT algorithm to trade off the exploitation of good actions and the exploration of unvisited states, but their empirical success comes at the cost of poor sample-efficiency and high computation time. In this paper, we overcome these limitations by introducing the use of convex regularization in Monte-Carlo Tree Search (MCTS) to drive exploration efficiently and to improve policy updates. First, we introduce a unifying theory on the use of generic convex regularizers in MCTS, deriving the first regret analysis of regularized MCTS and showing that it guarantees an exponential convergence rate. Second, we exploit our theoretical framework to introduce novel regularized backup operators for MCTS, based on the relative entropy of the policy update and, more importantly, on the Tsallis entropy of the policy, for which we prove superior theoretical guarantees. We empirically verify the consequence of our theoretical results on a toy problem. Finally, we show how our framework can easily be incorporated in AlphaGo and we empirically show the superiority of convex regularization, w.r.t. representative baselines, on well-known RL problems across several Atari games.", "bibtex": "@InProceedings{pmlr-v139-dam21a,\n title = \t {Convex Regularization in Monte-Carlo Tree Search},\n author = {Dam, Tuan Q and D'Eramo, Carlo and Peters, Jan and Pajarinen, Joni},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2365--2375},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dam21a/dam21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dam21a.html},\n abstract = \t {Monte-Carlo planning and Reinforcement Learning (RL) are essential to sequential decision making. The recent AlphaGo and AlphaZero algorithms have shown how to successfully combine these two paradigms to solve large-scale sequential decision problems. These methodologies exploit a variant of the well-known UCT algorithm to trade off the exploitation of good actions and the exploration of unvisited states, but their empirical success comes at the cost of poor sample-efficiency and high computation time. In this paper, we overcome these limitations by introducing the use of convex regularization in Monte-Carlo Tree Search (MCTS) to drive exploration efficiently and to improve policy updates. First, we introduce a unifying theory on the use of generic convex regularizers in MCTS, deriving the first regret analysis of regularized MCTS and showing that it guarantees an exponential convergence rate. Second, we exploit our theoretical framework to introduce novel regularized backup operators for MCTS, based on the relative entropy of the policy update and, more importantly, on the Tsallis entropy of the policy, for which we prove superior theoretical guarantees. We empirically verify the consequence of our theoretical results on a toy problem. Finally, we show how our framework can easily be incorporated in AlphaGo and we empirically show the superiority of convex regularization, w.r.t. representative baselines, on well-known RL problems across several Atari games.}\n}", "pdf": "http://proceedings.mlr.press/v139/dam21a/dam21a.pdf", "supp": "", "pdf_size": 4711185, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=466083474712545091&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, Technische Universit \u00a8at Darmstadt, Germany; Department of Computer Science, Technische Universit \u00a8at Darmstadt, Germany; Department of Computer Science, Technische Universit \u00a8at Darmstadt, Germany; Department of Computer Science, Technische Universit \u00a8at Darmstadt, Germany + Department of Electrical Engineering and Automation, Aalto University, Finland", "aff_domain": "tu-darmstadt.de; ; ; ", "email": "tu-darmstadt.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/dam21a.html", "aff_unique_index": "0;0;0;0+1", "aff_unique_norm": "Technische Universit\u00e4t Darmstadt;Aalto University", "aff_unique_dep": "Department of Computer Science;Department of Electrical Engineering and Automation", "aff_unique_url": "https://www.tu-darmstadt.de;https://www.aalto.fi", "aff_unique_abbr": "TUD;Aalto", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0+1", "aff_country_unique": "Germany;Finland" }, { "title": "ConvexVST: A Convex Optimization Approach to Variance-stabilizing Transformation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10549", "id": "10549", "proceeding": "http://proceedings.mlr.press/v139/wang21p.html", "slides": "", "author_site": "Mengfan Wang, Boyu Lyu, Guoqiang Yu", "author": "Mengfan Wang; Boyu Lyu; Guoqiang Yu", "abstract": "The variance-stabilizing transformation (VST) problem is to transform heteroscedastic data to homoscedastic data so that they are more tractable for subsequent analysis. However, most of the existing approaches focus on finding an analytical solution for a certain parametric distribution, which severely limits the applications, because simple distributions cannot faithfully describe the real data while more complicated distributions cannot be analytically solved. In this paper, we converted the VST problem into a convex optimization problem, which can always be efficiently solved, identified the specific structure of the convex problem, which further improved the efficiency of the proposed algorithm, and showed that any finite discrete distributions and the discretized version of any continuous distributions from real data can be variance-stabilized in an easy and nonparametric way. We demonstrated the new approach on bioimaging data and achieved superior performance compared to peer algorithms in terms of not only the variance homoscedasticity but also the impact on subsequent analysis such as denoising. Source codes are available at https://github.com/yu-lab-vt/ConvexVST.", "bibtex": "@InProceedings{pmlr-v139-wang21p,\n title = \t {ConvexVST: A Convex Optimization Approach to Variance-stabilizing Transformation},\n author = {Wang, Mengfan and Lyu, Boyu and Yu, Guoqiang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10839--10848},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21p/wang21p.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21p.html},\n abstract = \t {The variance-stabilizing transformation (VST) problem is to transform heteroscedastic data to homoscedastic data so that they are more tractable for subsequent analysis. However, most of the existing approaches focus on finding an analytical solution for a certain parametric distribution, which severely limits the applications, because simple distributions cannot faithfully describe the real data while more complicated distributions cannot be analytically solved. In this paper, we converted the VST problem into a convex optimization problem, which can always be efficiently solved, identified the specific structure of the convex problem, which further improved the efficiency of the proposed algorithm, and showed that any finite discrete distributions and the discretized version of any continuous distributions from real data can be variance-stabilized in an easy and nonparametric way. We demonstrated the new approach on bioimaging data and achieved superior performance compared to peer algorithms in terms of not only the variance homoscedasticity but also the impact on subsequent analysis such as denoising. Source codes are available at https://github.com/yu-lab-vt/ConvexVST.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21p/wang21p.pdf", "supp": "", "pdf_size": 2967303, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8470318237210622723&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Bradley Department of Electrical and Computer Engineering, Virginia Tech, VA, USA; Bradley Department of Electrical and Computer Engineering, Virginia Tech, VA, USA; Bradley Department of Electrical and Computer Engineering, Virginia Tech, VA, USA", "aff_domain": "vt.edu; ; ", "email": "vt.edu; ; ", "github": "https://github.com/yu-lab-vt/ConvexVST", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wang21p.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Virginia Tech", "aff_unique_dep": "Bradley Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.vt.edu", "aff_unique_abbr": "VT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Blacksburg", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Cooperative Exploration for Multi-Agent Deep Reinforcement Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8947", "id": "8947", "proceeding": "http://proceedings.mlr.press/v139/liu21j.html", "slides": "/media/icml-2021/Slides/8947.pdf", "author_site": "Iou-Jen Liu, Unnat Jain, Raymond Yeh, Alex Schwing", "author": "Iou-Jen Liu; Unnat Jain; Raymond A Yeh; Alexander Schwing", "abstract": "Exploration is critical for good results in deep reinforcement learning and has attracted much attention. However, existing multi-agent deep reinforcement learning algorithms still use mostly noise-based techniques. Very recently, exploration methods that consider cooperation among multiple agents have been developed. However, existing methods suffer from a common challenge: agents struggle to identify states that are worth exploring, and hardly coordinate exploration efforts toward those states. To address this shortcoming, in this paper, we propose cooperative multi-agent exploration (CMAE): agents share a common goal while exploring. The goal is selected from multiple projected state spaces by a normalized entropy-based technique. Then, agents are trained to reach the goal in a coordinated manner. We demonstrate that CMAE consistently outperforms baselines on various tasks, including a sparse-reward version of multiple-particle environment (MPE) and the Starcraft multi-agent challenge (SMAC).", "bibtex": "@InProceedings{pmlr-v139-liu21j,\n title = \t {Cooperative Exploration for Multi-Agent Deep Reinforcement Learning},\n author = {Liu, Iou-Jen and Jain, Unnat and Yeh, Raymond A and Schwing, Alexander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6826--6836},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21j/liu21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21j.html},\n abstract = \t {Exploration is critical for good results in deep reinforcement learning and has attracted much attention. However, existing multi-agent deep reinforcement learning algorithms still use mostly noise-based techniques. Very recently, exploration methods that consider cooperation among multiple agents have been developed. However, existing methods suffer from a common challenge: agents struggle to identify states that are worth exploring, and hardly coordinate exploration efforts toward those states. To address this shortcoming, in this paper, we propose cooperative multi-agent exploration (CMAE): agents share a common goal while exploring. The goal is selected from multiple projected state spaces by a normalized entropy-based technique. Then, agents are trained to reach the goal in a coordinated manner. We demonstrate that CMAE consistently outperforms baselines on various tasks, including a sparse-reward version of multiple-particle environment (MPE) and the Starcraft multi-agent challenge (SMAC).}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21j/liu21j.pdf", "supp": "", "pdf_size": 858327, "gs_citation": 137, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8071512515869425924&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu; ; ; ", "email": "illinois.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21j.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Correcting Exposure Bias for Link Recommendation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10327", "id": "10327", "proceeding": "http://proceedings.mlr.press/v139/gupta21c.html", "slides": "/media/icml-2021/Slides/10327.pdf", "author_site": "Shantanu Gupta, Hao Wang, Zachary Lipton, Yuyang Wang", "author": "Shantanu Gupta; Hao Wang; Zachary Lipton; Yuyang Wang", "abstract": "Link prediction methods are frequently applied in recommender systems, e.g., to suggest citations for academic papers or friends in social networks. However, exposure bias can arise when users are systematically underexposed to certain relevant items. For example, in citation networks, authors might be more likely to encounter papers from their own field and thus cite them preferentially. This bias can propagate through naively trained link predictors, leading to both biased evaluation and high generalization error (as assessed by true relevance). Moreover, this bias can be exacerbated by feedback loops. We propose estimators that leverage known exposure probabilities to mitigate this bias and consequent feedback loops. Next, we provide a loss function for learning the exposure probabilities from data. Finally, experiments on semi-synthetic data based on real-world citation networks, show that our methods reliably identify (truly) relevant citations. Additionally, our methods lead to greater diversity in the recommended papers\u2019 fields of study. The code is available at github.com/shantanu95/exposure-bias-link-rec.", "bibtex": "@InProceedings{pmlr-v139-gupta21c,\n title = \t {Correcting Exposure Bias for Link Recommendation},\n author = {Gupta, Shantanu and Wang, Hao and Lipton, Zachary and Wang, Yuyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3953--3963},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gupta21c/gupta21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/gupta21c.html},\n abstract = \t {Link prediction methods are frequently applied in recommender systems, e.g., to suggest citations for academic papers or friends in social networks. However, exposure bias can arise when users are systematically underexposed to certain relevant items. For example, in citation networks, authors might be more likely to encounter papers from their own field and thus cite them preferentially. This bias can propagate through naively trained link predictors, leading to both biased evaluation and high generalization error (as assessed by true relevance). Moreover, this bias can be exacerbated by feedback loops. We propose estimators that leverage known exposure probabilities to mitigate this bias and consequent feedback loops. Next, we provide a loss function for learning the exposure probabilities from data. Finally, experiments on semi-synthetic data based on real-world citation networks, show that our methods reliably identify (truly) relevant citations. Additionally, our methods lead to greater diversity in the recommended papers\u2019 fields of study. The code is available at github.com/shantanu95/exposure-bias-link-rec.}\n}", "pdf": "http://proceedings.mlr.press/v139/gupta21c/gupta21c.pdf", "supp": "", "pdf_size": 405089, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8695845050687290736&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Amazon; Machine Learning Department, Carnegie Mellon University; Department of Computer Science, Rutgers University; Amazon Web Services (AWS) AI Labs, Palo Alto, CA, USA", "aff_domain": "cs.cmu.edu; ; ; ", "email": "cs.cmu.edu; ; ; ", "github": "github.com/shantanu95/exposure-bias-link-rec", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gupta21c.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Amazon;Carnegie Mellon University;Rutgers University", "aff_unique_dep": "Amazon.com, Inc.;Machine Learning Department;Department of Computer Science", "aff_unique_url": "https://www.amazon.com;https://www.cmu.edu;https://www.rutgers.edu", "aff_unique_abbr": "Amazon;CMU;Rutgers", "aff_campus_unique_index": "1", "aff_campus_unique": ";Palo Alto", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Correlation Clustering in Constant Many Parallel Rounds", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10213", "id": "10213", "proceeding": "http://proceedings.mlr.press/v139/cohen-addad21b.html", "slides": "", "author_site": "Vincent Cohen-Addad, Silvio Lattanzi, Slobodan Mitrovi\u0107, Ashkan Norouzi-Fard, Nikos Parotsidis, Jakub Tarnawski", "author": "Vincent Cohen-Addad; Silvio Lattanzi; Slobodan Mitrovi\u0107; Ashkan Norouzi-Fard; Nikos Parotsidis; Jakub Tarnawski", "abstract": "Correlation clustering is a central topic in unsupervised learning, with many applications in ML and data mining. In correlation clustering, one receives as input a signed graph and the goal is to partition it to minimize the number of disagreements. In this work we propose a massively parallel computation (MPC) algorithm for this problem that is considerably faster than prior work. In particular, our algorithm uses machines with memory sublinear in the number of nodes in the graph and returns a constant approximation while running only for a constant number of rounds. To the best of our knowledge, our algorithm is the first that can provably approximate a clustering problem using only a constant number of MPC rounds in the sublinear memory regime. We complement our analysis with an experimental scalability evaluation of our techniques.", "bibtex": "@InProceedings{pmlr-v139-cohen-addad21b,\n title = \t {Correlation Clustering in Constant Many Parallel Rounds},\n author = {Cohen-Addad, Vincent and Lattanzi, Silvio and Mitrovi{\\'c}, Slobodan and Norouzi-Fard, Ashkan and Parotsidis, Nikos and Tarnawski, Jakub},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2069--2078},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cohen-addad21b/cohen-addad21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cohen-addad21b.html},\n abstract = \t {Correlation clustering is a central topic in unsupervised learning, with many applications in ML and data mining. In correlation clustering, one receives as input a signed graph and the goal is to partition it to minimize the number of disagreements. In this work we propose a massively parallel computation (MPC) algorithm for this problem that is considerably faster than prior work. In particular, our algorithm uses machines with memory sublinear in the number of nodes in the graph and returns a constant approximation while running only for a constant number of rounds. To the best of our knowledge, our algorithm is the first that can provably approximate a clustering problem using only a constant number of MPC rounds in the sublinear memory regime. We complement our analysis with an experimental scalability evaluation of our techniques.}\n}", "pdf": "http://proceedings.mlr.press/v139/cohen-addad21b/cohen-addad21b.pdf", "supp": "", "pdf_size": 367522, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10550273772236757303&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Google Research, Z\u00fcrich, Switzerland; Google Research, Z\u00fcrich, Switzerland; CSAIL, MIT, Cambridge, MA, USA; Google Research, Z\u00fcrich, Switzerland; Google Research, Z\u00fcrich, Switzerland; Microsoft Research, Redmond, WA, USA", "aff_domain": "google.com;google.com;mit.edu;google.com;google.com;microsoft.com", "email": "google.com;google.com;mit.edu;google.com;google.com;microsoft.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cohen-addad21b.html", "aff_unique_index": "0;0;1;0;0;2", "aff_unique_norm": "Google;Massachusetts Institute of Technology;Microsoft", "aff_unique_dep": "Google Research;Computer Science and Artificial Intelligence Laboratory;Microsoft Research", "aff_unique_url": "https://research.google;https://www.csail.mit.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Google Research;MIT;MSR", "aff_campus_unique_index": "0;0;1;0;0;2", "aff_campus_unique": "Z\u00fcrich;Cambridge;Redmond", "aff_country_unique_index": "0;0;1;0;0;1", "aff_country_unique": "Switzerland;United States" }, { "title": "CountSketches, Feature Hashing and the Median of Three", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9691", "id": "9691", "proceeding": "http://proceedings.mlr.press/v139/larsen21a.html", "slides": "", "author_site": "Kasper Green Larsen, Rasmus Pagh, Jakub T\u011btek", "author": "Kasper Green Larsen; Rasmus Pagh; Jakub T\u011btek", "abstract": "In this paper, we revisit the classic CountSketch method, which is a sparse, random projection that transforms a (high-dimensional) Euclidean vector $v$ to a vector of dimension $(2t-1) s$, where $t, s > 0$ are integer parameters. It is known that a CountSketch allows estimating coordinates of $v$ with variance bounded by $\\|v\\|_2^2/s$. For $t > 1$, the estimator takes the median of $2t-1$ independent estimates, and the probability that the estimate is off by more than $2 \\|v\\|_2/\\sqrt{s}$ is exponentially small in $t$. This suggests choosing $t$ to be logarithmic in a desired inverse failure probability. However, implementations of CountSketch often use a small, constant $t$. Previous work only predicts a constant factor improvement in this setting. Our main contribution is a new analysis of CountSketch, showing an improvement in variance to $O(\\min\\{\\|v\\|_1^2/s^2,\\|v\\|_2^2/s\\})$ when $t > 1$. That is, the variance decreases proportionally to $s^{-2}$, asymptotically for large enough $s$.", "bibtex": "@InProceedings{pmlr-v139-larsen21a,\n title = \t {CountSketches, Feature Hashing and the Median of Three},\n author = {Larsen, Kasper Green and Pagh, Rasmus and T{\\v{e}}tek, Jakub},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6011--6020},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/larsen21a/larsen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/larsen21a.html},\n abstract = \t {In this paper, we revisit the classic CountSketch method, which is a sparse, random projection that transforms a (high-dimensional) Euclidean vector $v$ to a vector of dimension $(2t-1) s$, where $t, s > 0$ are integer parameters. It is known that a CountSketch allows estimating coordinates of $v$ with variance bounded by $\\|v\\|_2^2/s$. For $t > 1$, the estimator takes the median of $2t-1$ independent estimates, and the probability that the estimate is off by more than $2 \\|v\\|_2/\\sqrt{s}$ is exponentially small in $t$. This suggests choosing $t$ to be logarithmic in a desired inverse failure probability. However, implementations of CountSketch often use a small, constant $t$. Previous work only predicts a constant factor improvement in this setting. Our main contribution is a new analysis of CountSketch, showing an improvement in variance to $O(\\min\\{\\|v\\|_1^2/s^2,\\|v\\|_2^2/s\\})$ when $t > 1$. That is, the variance decreases proportionally to $s^{-2}$, asymptotically for large enough $s$.}\n}", "pdf": "http://proceedings.mlr.press/v139/larsen21a/larsen21a.pdf", "supp": "", "pdf_size": 460900, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8371640128554972111&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Aarhus University, Denmark; BARC, Department of Computer Science, University of Copenhagen, Denmark; BARC, Department of Computer Science, University of Copenhagen, Denmark", "aff_domain": "cs.au.dk;di.ku.dk;gmail.com", "email": "cs.au.dk;di.ku.dk;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/larsen21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Aarhus University;University of Copenhagen", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://au.dk;https://www.ku.dk", "aff_unique_abbr": "AU;UCPH", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Denmark" }, { "title": "Counterfactual Credit Assignment in Model-Free Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9795", "id": "9795", "proceeding": "http://proceedings.mlr.press/v139/mesnard21a.html", "slides": "", "author_site": "Thomas Mesnard, Theophane Weber, Fabio Viola, Shantanu Thakoor, Alaa Saade, Anna Harutyunyan, Will Dabney, Thomas Stepleton, Nicolas Heess, Arthur Guez, Eric Moulines, Marcus Hutter, Lars Buesing, Remi Munos", "author": "Thomas Mesnard; Theophane Weber; Fabio Viola; Shantanu Thakoor; Alaa Saade; Anna Harutyunyan; Will Dabney; Thomas S Stepleton; Nicolas Heess; Arthur Guez; Eric Moulines; Marcus Hutter; Lars Buesing; Remi Munos", "abstract": "Credit assignment in reinforcement learning is the problem of measuring an action\u2019s influence on future rewards. In particular, this requires separating skill from luck, i.e. disentangling the effect of an action on rewards from that of external factors and subsequent actions. To achieve this, we adapt the notion of counterfactuals from causality theory to a model-free RL setup. The key idea is to condition value functions on future events, by learning to extract relevant information from a trajectory. We formulate a family of policy gradient algorithms that use these future-conditional value functions as baselines or critics, and show that they are provably low variance. To avoid the potential bias from conditioning on future information, we constrain the hindsight information to not contain information about the agent\u2019s actions. We demonstrate the efficacy and validity of our algorithm on a number of illustrative and challenging problems.", "bibtex": "@InProceedings{pmlr-v139-mesnard21a,\n title = \t {Counterfactual Credit Assignment in Model-Free Reinforcement Learning},\n author = {Mesnard, Thomas and Weber, Theophane and Viola, Fabio and Thakoor, Shantanu and Saade, Alaa and Harutyunyan, Anna and Dabney, Will and Stepleton, Thomas S and Heess, Nicolas and Guez, Arthur and Moulines, Eric and Hutter, Marcus and Buesing, Lars and Munos, Remi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7654--7664},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mesnard21a/mesnard21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mesnard21a.html},\n abstract = \t {Credit assignment in reinforcement learning is the problem of measuring an action\u2019s influence on future rewards. In particular, this requires separating skill from luck, i.e. disentangling the effect of an action on rewards from that of external factors and subsequent actions. To achieve this, we adapt the notion of counterfactuals from causality theory to a model-free RL setup. The key idea is to condition value functions on future events, by learning to extract relevant information from a trajectory. We formulate a family of policy gradient algorithms that use these future-conditional value functions as baselines or critics, and show that they are provably low variance. To avoid the potential bias from conditioning on future information, we constrain the hindsight information to not contain information about the agent\u2019s actions. We demonstrate the efficacy and validity of our algorithm on a number of illustrative and challenging problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/mesnard21a/mesnard21a.pdf", "supp": "", "pdf_size": 9719970, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5699933776164693919&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; INRIA XPOP, CMAP, \u00c9cole Polytechnique, Palaiseau, France; DeepMind; DeepMind; DeepMind", "aff_domain": "deepmind.com;deepmind.com; ; ; ; ; ; ; ; ; ; ; ; ", "email": "deepmind.com;deepmind.com; ; ; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 14, "oa": "https://proceedings.mlr.press/v139/mesnard21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;1;0;0;0", "aff_unique_norm": "DeepMind;Ecole Polytechnique", "aff_unique_dep": ";", "aff_unique_url": "https://deepmind.com;https://www.ensae.fr/", "aff_unique_abbr": "DeepMind;\u00c9cole Polytechnique", "aff_campus_unique_index": "1", "aff_campus_unique": ";Palaiseau", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;1;0;0;0", "aff_country_unique": "United Kingdom;France" }, { "title": "Cross-Gradient Aggregation for Decentralized Learning from Non-IID Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9767", "id": "9767", "proceeding": "http://proceedings.mlr.press/v139/esfandiari21a.html", "slides": "", "author_site": "Yasaman Esfandiari, Sin Yong Tan, Zhanhong Jiang, Aditya Balu, Ethan Herron, Chinmay Hegde, Soumik Sarkar", "author": "Yasaman Esfandiari; Sin Yong Tan; Zhanhong Jiang; Aditya Balu; Ethan Herron; Chinmay Hegde; Soumik Sarkar", "abstract": "Decentralized learning enables a group of collaborative agents to learn models using a distributed dataset without the need for a central parameter server. Recently, decentralized learning algorithms have demonstrated state-of-the-art results on benchmark data sets, comparable with centralized algorithms. However, the key assumption to achieve competitive performance is that the data is independently and identically distributed (IID) among the agents which, in real-life applications, is often not applicable. Inspired by ideas from continual learning, we propose Cross-Gradient Aggregation (CGA), a novel decentralized learning algorithm where (i) each agent aggregates cross-gradient information, i.e., derivatives of its model with respect to its neighbors\u2019 datasets, and (ii) updates its model using a projected gradient based on quadratic programming (QP). We theoretically analyze the convergence characteristics of CGA and demonstrate its efficiency on non-IID data distributions sampled from the MNIST and CIFAR-10 datasets. Our empirical comparisons show superior learning performance of CGA over existing state-of-the-art decentralized learning algorithms, as well as maintaining the improved performance under information compression to reduce peer-to-peer communication overhead. The code is available here on GitHub.", "bibtex": "@InProceedings{pmlr-v139-esfandiari21a,\n title = \t {Cross-Gradient Aggregation for Decentralized Learning from Non-IID Data},\n author = {Esfandiari, Yasaman and Tan, Sin Yong and Jiang, Zhanhong and Balu, Aditya and Herron, Ethan and Hegde, Chinmay and Sarkar, Soumik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3036--3046},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/esfandiari21a/esfandiari21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/esfandiari21a.html},\n abstract = \t {Decentralized learning enables a group of collaborative agents to learn models using a distributed dataset without the need for a central parameter server. Recently, decentralized learning algorithms have demonstrated state-of-the-art results on benchmark data sets, comparable with centralized algorithms. However, the key assumption to achieve competitive performance is that the data is independently and identically distributed (IID) among the agents which, in real-life applications, is often not applicable. Inspired by ideas from continual learning, we propose Cross-Gradient Aggregation (CGA), a novel decentralized learning algorithm where (i) each agent aggregates cross-gradient information, i.e., derivatives of its model with respect to its neighbors\u2019 datasets, and (ii) updates its model using a projected gradient based on quadratic programming (QP). We theoretically analyze the convergence characteristics of CGA and demonstrate its efficiency on non-IID data distributions sampled from the MNIST and CIFAR-10 datasets. Our empirical comparisons show superior learning performance of CGA over existing state-of-the-art decentralized learning algorithms, as well as maintaining the improved performance under information compression to reduce peer-to-peer communication overhead. The code is available here on GitHub.}\n}", "pdf": "http://proceedings.mlr.press/v139/esfandiari21a/esfandiari21a.pdf", "supp": "", "pdf_size": 686207, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13501782840884499288&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Mechanical Engineering, Iowa State University; Department of Mechanical Engineering, Iowa State University; Johnson Controls; Department of Mechanical Engineering, Iowa State University; Department of Mechanical Engineering, Iowa State University; Computer Science and Engineering Department, New York University; Department of Mechanical Engineering, Iowa State University", "aff_domain": "iastate.edu;iastate.edu;johnsoncontrols.com;iastate.edu;iastate.edu;nyu.edu;iastate.edu", "email": "iastate.edu;iastate.edu;johnsoncontrols.com;iastate.edu;iastate.edu;nyu.edu;iastate.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/esfandiari21a.html", "aff_unique_index": "0;0;1;0;0;2;0", "aff_unique_norm": "Iowa State University;Johnson Controls;New York University", "aff_unique_dep": "Department of Mechanical Engineering;;Computer Science and Engineering Department", "aff_unique_url": "https://www.iastate.edu;https://www.johnsoncontrols.com;https://www.nyu.edu", "aff_unique_abbr": "ISU;;NYU", "aff_campus_unique_index": "1", "aff_campus_unique": ";New York", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Cross-domain Imitation from Observations", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8955", "id": "8955", "proceeding": "http://proceedings.mlr.press/v139/raychaudhuri21a.html", "slides": "", "author_site": "Dripta S. Raychaudhuri, Sujoy Paul, Jeroen Vanbaar, Amit Roy-Chowdhury", "author": "Dripta S. Raychaudhuri; Sujoy Paul; Jeroen Vanbaar; Amit K. Roy-Chowdhury", "abstract": "Imitation learning seeks to circumvent the difficulty in designing proper reward functions for training agents by utilizing expert behavior. With environments modeled as Markov Decision Processes (MDP), most of the existing imitation algorithms are contingent on the availability of expert demonstrations in the same MDP as the one in which a new imitation policy is to be learned. In this paper, we study the problem of how to imitate tasks when discrepancies exist between the expert and agent MDP. These discrepancies across domains could include differing dynamics, viewpoint, or morphology; we present a novel framework to learn correspondences across such domains. Importantly, in contrast to prior works, we use unpaired and unaligned trajectories containing only states in the expert domain, to learn this correspondence. We utilize a cycle-consistency constraint on both the state space and a domain agnostic latent space to do this. In addition, we enforce consistency on the temporal position of states via a normalized position estimator function, to align the trajectories across the two domains. Once this correspondence is found, we can directly transfer the demonstrations on one domain to the other and use it for imitation. Experiments across a wide variety of challenging domains demonstrate the efficacy of our approach.", "bibtex": "@InProceedings{pmlr-v139-raychaudhuri21a,\n title = \t {Cross-domain Imitation from Observations},\n author = {Raychaudhuri, Dripta S. and Paul, Sujoy and Vanbaar, Jeroen and Roy-Chowdhury, Amit K.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8902--8912},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/raychaudhuri21a/raychaudhuri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/raychaudhuri21a.html},\n abstract = \t {Imitation learning seeks to circumvent the difficulty in designing proper reward functions for training agents by utilizing expert behavior. With environments modeled as Markov Decision Processes (MDP), most of the existing imitation algorithms are contingent on the availability of expert demonstrations in the same MDP as the one in which a new imitation policy is to be learned. In this paper, we study the problem of how to imitate tasks when discrepancies exist between the expert and agent MDP. These discrepancies across domains could include differing dynamics, viewpoint, or morphology; we present a novel framework to learn correspondences across such domains. Importantly, in contrast to prior works, we use unpaired and unaligned trajectories containing only states in the expert domain, to learn this correspondence. We utilize a cycle-consistency constraint on both the state space and a domain agnostic latent space to do this. In addition, we enforce consistency on the temporal position of states via a normalized position estimator function, to align the trajectories across the two domains. Once this correspondence is found, we can directly transfer the demonstrations on one domain to the other and use it for imitation. Experiments across a wide variety of challenging domains demonstrate the efficacy of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/raychaudhuri21a/raychaudhuri21a.pdf", "supp": "", "pdf_size": 5774721, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12666986170916986885&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of California, Riverside; Google Research; Mitsubishi Electric Research Laboratories; University of California, Riverside", "aff_domain": "ece.ucr.edu; ; ; ", "email": "ece.ucr.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/raychaudhuri21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of California, Riverside;Google;Mitsubishi Electric Research Laboratories", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.ucr.edu;https://research.google;https://www.merl.com", "aff_unique_abbr": "UCR;Google Research;MERL", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Riverside;Mountain View;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Cross-model Back-translated Distillation for Unsupervised Machine Translation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10009", "id": "10009", "proceeding": "http://proceedings.mlr.press/v139/nguyen21c.html", "slides": "/media/icml-2021/Slides/10009.pdf", "author_site": "Xuan-Phi Nguyen, Shafiq Joty, Thanh-Tung Nguyen, Kui Wu, Ai Ti Aw", "author": "Xuan-Phi Nguyen; Shafiq Joty; Thanh-Tung Nguyen; Kui Wu; Ai Ti Aw", "abstract": "Recent unsupervised machine translation (UMT) systems usually employ three main principles: initialization, language modeling and iterative back-translation, though they may apply them differently. Crucially, iterative back-translation and denoising auto-encoding for language modeling provide data diversity to train the UMT systems. However, the gains from these diversification processes has seemed to plateau. We introduce a novel component to the standard UMT framework called Cross-model Back-translated Distillation (CBD), that is aimed to induce another level of data diversification that existing principles lack. CBD is applicable to all previous UMT approaches. In our experiments, CBD achieves the state of the art in the WMT\u201914 English-French, WMT\u201916 English-German and English-Romanian bilingual unsupervised translation tasks, with 38.2, 30.1, and 36.3 BLEU respectively. It also yields 1.5\u20133.3 BLEU improvements in IWSLT English-French and English-German tasks. Through extensive experimental analyses, we show that CBD is effective because it embraces data diversity while other similar variants do not.", "bibtex": "@InProceedings{pmlr-v139-nguyen21c,\n title = \t {Cross-model Back-translated Distillation for Unsupervised Machine Translation},\n author = {Nguyen, Xuan-Phi and Joty, Shafiq and Nguyen, Thanh-Tung and Wu, Kui and Aw, Ai Ti},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8073--8083},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21c/nguyen21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21c.html},\n abstract = \t {Recent unsupervised machine translation (UMT) systems usually employ three main principles: initialization, language modeling and iterative back-translation, though they may apply them differently. Crucially, iterative back-translation and denoising auto-encoding for language modeling provide data diversity to train the UMT systems. However, the gains from these diversification processes has seemed to plateau. We introduce a novel component to the standard UMT framework called Cross-model Back-translated Distillation (CBD), that is aimed to induce another level of data diversification that existing principles lack. CBD is applicable to all previous UMT approaches. In our experiments, CBD achieves the state of the art in the WMT\u201914 English-French, WMT\u201916 English-German and English-Romanian bilingual unsupervised translation tasks, with 38.2, 30.1, and 36.3 BLEU respectively. It also yields 1.5\u20133.3 BLEU improvements in IWSLT English-French and English-German tasks. Through extensive experimental analyses, we show that CBD is effective because it embraces data diversity while other similar variants do not.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21c/nguyen21c.pdf", "supp": "", "pdf_size": 277263, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12269896059746732525&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Nanyang Technological University+Institute for Infocomm Research (I2R), A*STAR; Nanyang Technological University+Salesforce Research Asia; Nanyang Technological University+Institute for Infocomm Research (I2R), A*STAR; Institute for Infocomm Research (I2R), A*STAR; Institute for Infocomm Research (I2R), A*STAR", "aff_domain": "e.ntu.edu.sg; ; ; ; ", "email": "e.ntu.edu.sg; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/nguyen21c.html", "aff_unique_index": "0+1;0+2;0+1;1;1", "aff_unique_norm": "Nanyang Technological University;Institute for Infocomm Research;Salesforce Research", "aff_unique_dep": ";;Research", "aff_unique_url": "https://www.ntu.edu.sg;https://www.i2r.a-star.edu.sg;https://research.salesforce.com", "aff_unique_abbr": "NTU;I2R;Salesforce Research Asia", "aff_campus_unique_index": ";1;", "aff_campus_unique": ";Asia", "aff_country_unique_index": "0+0;0+0;0+0;0;0", "aff_country_unique": "Singapore" }, { "title": "Crowdsourcing via Annotator Co-occurrence Imputation and Provable Symmetric Nonnegative Matrix Factorization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10351", "id": "10351", "proceeding": "http://proceedings.mlr.press/v139/ibrahim21a.html", "slides": "/media/icml-2021/Slides/10351.pdf", "author_site": "Shahana Ibrahim, Xiao Fu", "author": "Shahana Ibrahim; Xiao Fu", "abstract": "Unsupervised learning of the Dawid-Skene (D&S) model from noisy, incomplete and crowdsourced annotations has been a long-standing challenge, and is a critical step towards reliably labeling massive data. A recent work takes a coupled nonnegative matrix factorization (CNMF) perspective, and shows appealing features: It ensures the identifiability of the D&S model and enjoys low sample complexity, as only the estimates of the co-occurrences of annotator labels are involved. However, the identifiability holds only when certain somewhat restrictive conditions are met in the context of crowdsourcing. Optimizing the CNMF criterion is also costly\u2014and convergence assurances are elusive. This work recasts the pairwise co-occurrence based D&S model learning problem as a symmetric NMF (SymNMF) problem\u2014which offers enhanced identifiability relative to CNMF. In practice, the SymNMF model is often (largely) incomplete, due to the lack of co-labeled items by some annotators. Two lightweight algorithms are proposed for co-occurrence imputation. Then, a low-complexity shifted rectified linear unit (ReLU)-empowered SymNMF algorithm is proposed to identify the D&S model. Various performance characterizations (e.g., missing co-occurrence recoverability, stability, and convergence) and evaluations are also presented.", "bibtex": "@InProceedings{pmlr-v139-ibrahim21a,\n title = \t {Crowdsourcing via Annotator Co-occurrence Imputation and Provable Symmetric Nonnegative Matrix Factorization},\n author = {Ibrahim, Shahana and Fu, Xiao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4544--4554},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ibrahim21a/ibrahim21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ibrahim21a.html},\n abstract = \t {Unsupervised learning of the Dawid-Skene (D&S) model from noisy, incomplete and crowdsourced annotations has been a long-standing challenge, and is a critical step towards reliably labeling massive data. A recent work takes a coupled nonnegative matrix factorization (CNMF) perspective, and shows appealing features: It ensures the identifiability of the D&S model and enjoys low sample complexity, as only the estimates of the co-occurrences of annotator labels are involved. However, the identifiability holds only when certain somewhat restrictive conditions are met in the context of crowdsourcing. Optimizing the CNMF criterion is also costly\u2014and convergence assurances are elusive. This work recasts the pairwise co-occurrence based D&S model learning problem as a symmetric NMF (SymNMF) problem\u2014which offers enhanced identifiability relative to CNMF. In practice, the SymNMF model is often (largely) incomplete, due to the lack of co-labeled items by some annotators. Two lightweight algorithms are proposed for co-occurrence imputation. Then, a low-complexity shifted rectified linear unit (ReLU)-empowered SymNMF algorithm is proposed to identify the D&S model. Various performance characterizations (e.g., missing co-occurrence recoverability, stability, and convergence) and evaluations are also presented.}\n}", "pdf": "http://proceedings.mlr.press/v139/ibrahim21a/ibrahim21a.pdf", "supp": "", "pdf_size": 967158, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7770571927495532335&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "School of Electrical Engineering and Computer Science, Oregon State University, Corvallis, OR 97331, USA; School of Electrical Engineering and Computer Science, Oregon State University, Corvallis, OR 97331, USA", "aff_domain": "oregonstate.edu;oregonstate.edu", "email": "oregonstate.edu;oregonstate.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ibrahim21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Oregon State University", "aff_unique_dep": "School of Electrical Engineering and Computer Science", "aff_unique_url": "https://osu.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Corvallis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Crystallization Learning with the Delaunay Triangulation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8829", "id": "8829", "proceeding": "http://proceedings.mlr.press/v139/gu21a.html", "slides": "/media/icml-2021/Slides/8829.pdf", "author_site": "Jiaqi Gu, Guosheng Yin", "author": "Jiaqi Gu; Guosheng Yin", "abstract": "Based on the Delaunay triangulation, we propose the crystallization learning to estimate the conditional expectation function in the framework of nonparametric regression. By conducting the crystallization search for the Delaunay simplices closest to the target point in a hierarchical way, the crystallization learning estimates the conditional expectation of the response by fitting a local linear model to the data points of the constructed Delaunay simplices. Instead of conducting the Delaunay triangulation for the entire feature space which would encounter enormous computational difficulty, our approach focuses only on the neighborhood of the target point and thus greatly expedites the estimation for high-dimensional cases. Because the volumes of Delaunay simplices are adaptive to the density of feature data points, our method selects neighbor data points uniformly in all directions and thus is more robust to the local geometric structure of the data than existing nonparametric regression methods. We develop the asymptotic properties of the crystallization learning and conduct numerical experiments on both synthetic and real data to demonstrate the advantages of our method in estimation of the conditional expectation function and prediction of the response.", "bibtex": "@InProceedings{pmlr-v139-gu21a,\n title = \t {Crystallization Learning with the Delaunay Triangulation},\n author = {Gu, Jiaqi and Yin, Guosheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3854--3863},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gu21a/gu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gu21a.html},\n abstract = \t {Based on the Delaunay triangulation, we propose the crystallization learning to estimate the conditional expectation function in the framework of nonparametric regression. By conducting the crystallization search for the Delaunay simplices closest to the target point in a hierarchical way, the crystallization learning estimates the conditional expectation of the response by fitting a local linear model to the data points of the constructed Delaunay simplices. Instead of conducting the Delaunay triangulation for the entire feature space which would encounter enormous computational difficulty, our approach focuses only on the neighborhood of the target point and thus greatly expedites the estimation for high-dimensional cases. Because the volumes of Delaunay simplices are adaptive to the density of feature data points, our method selects neighbor data points uniformly in all directions and thus is more robust to the local geometric structure of the data than existing nonparametric regression methods. We develop the asymptotic properties of the crystallization learning and conduct numerical experiments on both synthetic and real data to demonstrate the advantages of our method in estimation of the conditional expectation function and prediction of the response.}\n}", "pdf": "http://proceedings.mlr.press/v139/gu21a/gu21a.pdf", "supp": "", "pdf_size": 963310, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9362464063365083763&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Statistics and Actuarial Science, University of Hong Kong, Hong Kong SAR; Department of Statistics and Actuarial Science, University of Hong Kong, Hong Kong SAR", "aff_domain": "hku.hk;hku.hk", "email": "hku.hk;hku.hk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gu21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Hong Kong", "aff_unique_dep": "Department of Statistics and Actuarial Science", "aff_unique_url": "https://www.hku.hk", "aff_unique_abbr": "HKU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Cumulants of Hawkes Processes are Robust to Observation Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10601", "id": "10601", "proceeding": "http://proceedings.mlr.press/v139/trouleau21a.html", "slides": "", "author_site": "William Trouleau, Jalal Etesami, Matthias Grossglauser, Negar Kiyavash, Patrick Thiran", "author": "William Trouleau; Jalal Etesami; Matthias Grossglauser; Negar Kiyavash; Patrick Thiran", "abstract": "Multivariate Hawkes processes (MHPs) are widely used in a variety of fields to model the occurrence of causally related discrete events in continuous time. Most state-of-the-art approaches address the problem of learning MHPs from perfect traces without noise. In practice, the process through which events are collected might introduce noise in the timestamps. In this work, we address the problem of learning the causal structure of MHPs when the observed timestamps of events are subject to random and unknown shifts, also known as random translations. We prove that the cumulants of MHPs are invariant to random translations, and therefore can be used to learn their underlying causal structure. Furthermore, we empirically characterize the effect of random translations on state-of-the-art learning methods. We show that maximum likelihood-based estimators are brittle, while cumulant-based estimators remain stable even in the presence of significant time shifts.", "bibtex": "@InProceedings{pmlr-v139-trouleau21a,\n title = \t {Cumulants of Hawkes Processes are Robust to Observation Noise},\n author = {Trouleau, William and Etesami, Jalal and Grossglauser, Matthias and Kiyavash, Negar and Thiran, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10444--10454},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/trouleau21a/trouleau21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/trouleau21a.html},\n abstract = \t {Multivariate Hawkes processes (MHPs) are widely used in a variety of fields to model the occurrence of causally related discrete events in continuous time. Most state-of-the-art approaches address the problem of learning MHPs from perfect traces without noise. In practice, the process through which events are collected might introduce noise in the timestamps. In this work, we address the problem of learning the causal structure of MHPs when the observed timestamps of events are subject to random and unknown shifts, also known as random translations. We prove that the cumulants of MHPs are invariant to random translations, and therefore can be used to learn their underlying causal structure. Furthermore, we empirically characterize the effect of random translations on state-of-the-art learning methods. We show that maximum likelihood-based estimators are brittle, while cumulant-based estimators remain stable even in the presence of significant time shifts.}\n}", "pdf": "http://proceedings.mlr.press/v139/trouleau21a/trouleau21a.pdf", "supp": "", "pdf_size": 1907695, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8510517486518430663&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland+College of Management of Technology, EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland+College of Management of Technology, EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland; College of Management of Technology, EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland", "aff_domain": "epfl.ch; ; ; ; ", "email": "epfl.ch; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/trouleau21a.html", "aff_unique_index": "0+0;0+0;0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "School of Computer and Communication Sciences", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "0+0;0+0;0;0;0", "aff_campus_unique": "Lausanne", "aff_country_unique_index": "0+0;0+0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Cyclically Equivariant Neural Decoders for Cyclic Codes", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10677", "id": "10677", "proceeding": "http://proceedings.mlr.press/v139/chen21w.html", "slides": "/media/icml-2021/Slides/10677.pdf", "author_site": "Xiangyu Chen, Min Ye", "author": "Xiangyu Chen; Min Ye", "abstract": "Neural decoders were introduced as a generalization of the classic Belief Propagation (BP) decoding algorithms, where the Trellis graph in the BP algorithm is viewed as a neural network, and the weights in the Trellis graph are optimized by training the neural network. In this work, we propose a novel neural decoder for cyclic codes by exploiting their cyclically invariant property. More precisely, we impose a shift invariant structure on the weights of our neural decoder so that any cyclic shift of inputs results in the same cyclic shift of outputs. Extensive simulations with BCH codes and punctured Reed-Muller (RM) codes show that our new decoder consistently outperforms previous neural decoders when decoding cyclic codes. Finally, we propose a list decoding procedure that can significantly reduce the decoding error probability for BCH codes and punctured RM codes. For certain high-rate codes, the gap between our list decoder and the Maximum Likelihood decoder is less than $0.1$dB. Code available at github.com/cyclicallyneuraldecoder", "bibtex": "@InProceedings{pmlr-v139-chen21w,\n title = \t {Cyclically Equivariant Neural Decoders for Cyclic Codes},\n author = {Chen, Xiangyu and Ye, Min},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1771--1780},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21w/chen21w.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21w.html},\n abstract = \t {Neural decoders were introduced as a generalization of the classic Belief Propagation (BP) decoding algorithms, where the Trellis graph in the BP algorithm is viewed as a neural network, and the weights in the Trellis graph are optimized by training the neural network. In this work, we propose a novel neural decoder for cyclic codes by exploiting their cyclically invariant property. More precisely, we impose a shift invariant structure on the weights of our neural decoder so that any cyclic shift of inputs results in the same cyclic shift of outputs. Extensive simulations with BCH codes and punctured Reed-Muller (RM) codes show that our new decoder consistently outperforms previous neural decoders when decoding cyclic codes. Finally, we propose a list decoding procedure that can significantly reduce the decoding error probability for BCH codes and punctured RM codes. For certain high-rate codes, the gap between our list decoder and the Maximum Likelihood decoder is less than $0.1$dB. Code available at github.com/cyclicallyneuraldecoder}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21w/chen21w.pdf", "supp": "", "pdf_size": 9425954, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14253987085025630344&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Data Science and Information Technology Research Center, Tsinghua-Berkeley Shenzhen Institute, Tsinghua Shenzhen International Graduate School, Shenzhen, China; Data Science and Information Technology Research Center, Tsinghua-Berkeley Shenzhen Institute, Tsinghua Shenzhen International Graduate School, Shenzhen, China", "aff_domain": "gmail.com;gmail.com", "email": "gmail.com;gmail.com", "github": "github.com/cyclicallyneuraldecoder", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/chen21w.html", "aff_unique_index": "0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "Data Science and Information Technology Research Center", "aff_unique_url": "http://www.tsinghua.edu.cn", "aff_unique_abbr": "Tsinghua", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Shenzhen", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "DAGs with No Curl: An Efficient DAG Structure Learning Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8423", "id": "8423", "proceeding": "http://proceedings.mlr.press/v139/yu21a.html", "slides": "/media/icml-2021/Slides/8423.pdf", "author_site": "Yue Yu, Tian Gao, Naiyu Yin, Qiang Ji", "author": "Yue Yu; Tian Gao; Naiyu Yin; Qiang Ji", "abstract": "Recently directed acyclic graph (DAG) structure learning is formulated as a constrained continuous optimization problem with continuous acyclicity constraints and was solved iteratively through subproblem optimization. To further improve efficiency, we propose a novel learning framework to model and learn the weighted adjacency matrices in the DAG space directly. Specifically, we first show that the set of weighted adjacency matrices of DAGs are equivalent to the set of weighted gradients of graph potential functions, and one may perform structure learning by searching in this equivalent set of DAGs. To instantiate this idea, we propose a new algorithm, DAG-NoCurl, which solves the optimization problem efficiently with a two-step procedure: $1)$ first we find an initial non-acyclic solution to the optimization problem, and $2)$ then we employ the Hodge decomposition of graphs and learn an acyclic graph by projecting the non-acyclic graph to the gradient of a potential function. Experimental studies on benchmark datasets demonstrate that our method provides comparable accuracy but better efficiency than baseline DAG structure learning methods on both linear and generalized structural equation models, often by more than one order of magnitude.", "bibtex": "@InProceedings{pmlr-v139-yu21a,\n title = \t {DAGs with No Curl: An Efficient DAG Structure Learning Approach},\n author = {Yu, Yue and Gao, Tian and Yin, Naiyu and Ji, Qiang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12156--12166},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21a/yu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21a.html},\n abstract = \t {Recently directed acyclic graph (DAG) structure learning is formulated as a constrained continuous optimization problem with continuous acyclicity constraints and was solved iteratively through subproblem optimization. To further improve efficiency, we propose a novel learning framework to model and learn the weighted adjacency matrices in the DAG space directly. Specifically, we first show that the set of weighted adjacency matrices of DAGs are equivalent to the set of weighted gradients of graph potential functions, and one may perform structure learning by searching in this equivalent set of DAGs. To instantiate this idea, we propose a new algorithm, DAG-NoCurl, which solves the optimization problem efficiently with a two-step procedure: $1)$ first we find an initial non-acyclic solution to the optimization problem, and $2)$ then we employ the Hodge decomposition of graphs and learn an acyclic graph by projecting the non-acyclic graph to the gradient of a potential function. Experimental studies on benchmark datasets demonstrate that our method provides comparable accuracy but better efficiency than baseline DAG structure learning methods on both linear and generalized structural equation models, often by more than one order of magnitude.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21a/yu21a.pdf", "supp": "", "pdf_size": 362176, "gs_citation": 83, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3161455728562313506&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Mathematics, Lehigh University, Bethlehem, PA; IBM Research, Yorktown Heights, NY; Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY; Department of Electrical, Computer, and Systems Engineering, Rensselaer Polytechnic Institute, Troy, NY", "aff_domain": "lehigh.edu; ; ;", "email": "lehigh.edu; ; ;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yu21a.html", "aff_unique_index": "0;1;2;2", "aff_unique_norm": "Lehigh University;IBM;Rensselaer Polytechnic Institute", "aff_unique_dep": "Department of Mathematics;IBM Research;Department of Electrical, Computer, and Systems Engineering", "aff_unique_url": "https://www.lehigh.edu;https://www.ibm.com/research;https://www.rpi.edu", "aff_unique_abbr": "Lehigh;IBM;RPI", "aff_campus_unique_index": "0;1;2;2", "aff_campus_unique": "Bethlehem;Yorktown Heights;Troy", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "DANCE: Enhancing saliency maps using decoys", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9701", "id": "9701", "proceeding": "http://proceedings.mlr.press/v139/lu21b.html", "slides": "", "author_site": "Yang Lu, Wenbo Guo, Xinyu Xing, William Stafford Noble", "author": "Yang Young Lu; Wenbo Guo; Xinyu Xing; William Stafford Noble", "abstract": "Saliency methods can make deep neural network predictions more interpretable by identifying a set of critical features in an input sample, such as pixels that contribute most strongly to a prediction made by an image classifier. Unfortunately, recent evidence suggests that many saliency methods poorly perform, especially in situations where gradients are saturated, inputs contain adversarial perturbations, or predictions rely upon inter-feature dependence. To address these issues, we propose a framework, DANCE, which improves the robustness of saliency methods by following a two-step procedure. First, we introduce a perturbation mechanism that subtly varies the input sample without changing its intermediate representations. Using this approach, we can gather a corpus of perturbed (\"decoy\") data samples while ensuring that the perturbed and original input samples follow similar distributions. Second, we compute saliency maps for the decoy samples and propose a new method to aggregate saliency maps. With this design, we offset influence of gradient saturation. From a theoretical perspective, we show that the aggregated saliency map not only captures inter-feature dependence but, more importantly, is robust against previously described adversarial perturbation methods. Our empirical results suggest that, both qualitatively and quantitatively, DANCE outperforms existing methods in a variety of application domains.", "bibtex": "@InProceedings{pmlr-v139-lu21b,\n title = \t {DANCE: Enhancing saliency maps using decoys},\n author = {Lu, Yang Young and Guo, Wenbo and Xing, Xinyu and Noble, William Stafford},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7124--7133},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lu21b/lu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/lu21b.html},\n abstract = \t {Saliency methods can make deep neural network predictions more interpretable by identifying a set of critical features in an input sample, such as pixels that contribute most strongly to a prediction made by an image classifier. Unfortunately, recent evidence suggests that many saliency methods poorly perform, especially in situations where gradients are saturated, inputs contain adversarial perturbations, or predictions rely upon inter-feature dependence. To address these issues, we propose a framework, DANCE, which improves the robustness of saliency methods by following a two-step procedure. First, we introduce a perturbation mechanism that subtly varies the input sample without changing its intermediate representations. Using this approach, we can gather a corpus of perturbed (\"decoy\") data samples while ensuring that the perturbed and original input samples follow similar distributions. Second, we compute saliency maps for the decoy samples and propose a new method to aggregate saliency maps. With this design, we offset influence of gradient saturation. From a theoretical perspective, we show that the aggregated saliency map not only captures inter-feature dependence but, more importantly, is robust against previously described adversarial perturbation methods. Our empirical results suggest that, both qualitatively and quantitatively, DANCE outperforms existing methods in a variety of application domains.}\n}", "pdf": "http://proceedings.mlr.press/v139/lu21b/lu21b.pdf", "supp": "", "pdf_size": 6736368, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13345927363678932014&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Genome Sciences, University of Washington, Seattle, WA, USA+Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA, USA; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA, USA", "aff_domain": "uw.edu;ist.psu.edu;ist.psu.edu;uw.edu", "email": "uw.edu;ist.psu.edu;ist.psu.edu;uw.edu", "github": "", "project": "https://bitbucket.org/noblelab/dance", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lu21b.html", "aff_unique_index": "0+0;1;1;0", "aff_unique_norm": "University of Washington;Pennsylvania State University", "aff_unique_dep": "Department of Genome Sciences;College of Information Sciences and Technology", "aff_unique_url": "https://www.washington.edu;https://www.psu.edu", "aff_unique_abbr": "UW;PSU", "aff_campus_unique_index": "0+0;1;1;0", "aff_campus_unique": "Seattle;State College", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "DFAC Framework: Factorizing the Value Function via Quantile Mixture for Multi-Agent Distributional Q-Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9111", "id": "9111", "proceeding": "http://proceedings.mlr.press/v139/sun21c.html", "slides": "/media/icml-2021/Slides/9111_JMvcWHA.pdf", "author_site": "Wei-Fang Sun, Cheng-Kuang Lee, Chun-Yi Lee", "author": "Wei-Fang Sun; Cheng-Kuang Lee; Chun-Yi Lee", "abstract": "In fully cooperative multi-agent reinforcement learning (MARL) settings, the environments are highly stochastic due to the partial observability of each agent and the continuously changing policies of the other agents. To address the above issues, we integrate distributional RL and value function factorization methods by proposing a Distributional Value Function Factorization (DFAC) framework to generalize expected value function factorization methods to their distributional variants. DFAC extends the individual utility functions from deterministic variables to random variables, and models the quantile function of the total return as a quantile mixture. To validate DFAC, we demonstrate DFAC\u2019s ability to factorize a simple two-step matrix game with stochastic rewards and perform experiments on all Super Hard tasks of StarCraft Multi-Agent Challenge, showing that DFAC is able to outperform expected value function factorization baselines.", "bibtex": "@InProceedings{pmlr-v139-sun21c,\n title = \t {DFAC Framework: Factorizing the Value Function via Quantile Mixture for Multi-Agent Distributional Q-Learning},\n author = {Sun, Wei-Fang and Lee, Cheng-Kuang and Lee, Chun-Yi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9945--9954},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sun21c/sun21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/sun21c.html},\n abstract = \t {In fully cooperative multi-agent reinforcement learning (MARL) settings, the environments are highly stochastic due to the partial observability of each agent and the continuously changing policies of the other agents. To address the above issues, we integrate distributional RL and value function factorization methods by proposing a Distributional Value Function Factorization (DFAC) framework to generalize expected value function factorization methods to their distributional variants. DFAC extends the individual utility functions from deterministic variables to random variables, and models the quantile function of the total return as a quantile mixture. To validate DFAC, we demonstrate DFAC\u2019s ability to factorize a simple two-step matrix game with stochastic rewards and perform experiments on all Super Hard tasks of StarCraft Multi-Agent Challenge, showing that DFAC is able to outperform expected value function factorization baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/sun21c/sun21c.pdf", "supp": "", "pdf_size": 1329335, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13269837837943676067&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, National Tsing Hua University, Taiwan+Wei-Fang Sun contributed to the work during his NVIDIA internship; NVIDIA AI Technology Center, NVIDIA Corporation; Department of Computer Science, National Tsing Hua University, Taiwan", "aff_domain": "cs.nthu.edu.tw; ;cs.nthu.edu.tw", "email": "cs.nthu.edu.tw; ;cs.nthu.edu.tw", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sun21c.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "National Tsing Hua University;NVIDIA", "aff_unique_dep": "Department of Computer Science;NVIDIA", "aff_unique_url": "https://www.nthu.edu.tw;https://www.nvidia.com", "aff_unique_abbr": "NTHU;NVIDIA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Taiwan;", "aff_country_unique_index": "0+1;1;0", "aff_country_unique": "China;United States" }, { "title": "DG-LMC: A Turn-key and Scalable Synchronous Distributed MCMC Algorithm via Langevin Monte Carlo within Gibbs", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9275", "id": "9275", "proceeding": "http://proceedings.mlr.press/v139/plassier21a.html", "slides": "", "author_site": "Vincent Plassier, Maxime Vono, Alain Durmus, Eric Moulines", "author": "Vincent Plassier; Maxime Vono; Alain Durmus; Eric Moulines", "abstract": "Performing reliable Bayesian inference on a big data scale is becoming a keystone in the modern era of machine learning. A workhorse class of methods to achieve this task are Markov chain Monte Carlo (MCMC) algorithms and their design to handle distributed datasets has been the subject of many works. However, existing methods are not completely either reliable or computationally efficient. In this paper, we propose to fill this gap in the case where the dataset is partitioned and stored on computing nodes within a cluster under a master/slaves architecture. We derive a user-friendly centralised distributed MCMC algorithm with provable scaling in high-dimensional settings. We illustrate the relevance of the proposed methodology on both synthetic and real data experiments.", "bibtex": "@InProceedings{pmlr-v139-plassier21a,\n title = \t {DG-LMC: A Turn-key and Scalable Synchronous Distributed MCMC Algorithm via Langevin Monte Carlo within Gibbs},\n author = {Plassier, Vincent and Vono, Maxime and Durmus, Alain and Moulines, Eric},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8577--8587},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/plassier21a/plassier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/plassier21a.html},\n abstract = \t {Performing reliable Bayesian inference on a big data scale is becoming a keystone in the modern era of machine learning. A workhorse class of methods to achieve this task are Markov chain Monte Carlo (MCMC) algorithms and their design to handle distributed datasets has been the subject of many works. However, existing methods are not completely either reliable or computationally efficient. In this paper, we propose to fill this gap in the case where the dataset is partitioned and stored on computing nodes within a cluster under a master/slaves architecture. We derive a user-friendly centralised distributed MCMC algorithm with provable scaling in high-dimensional settings. We illustrate the relevance of the proposed methodology on both synthetic and real data experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/plassier21a/plassier21a.pdf", "supp": "", "pdf_size": 993054, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1198660021452291587&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "CMAP, Ecole Polytechnique, Institut Polytechnique de Paris, Palaiseau, France+Lagrange Mathematics and Computing Research Center, Paris, 75007, France; Lagrange Mathematics and Computing Research Center, Paris, 75007, France; Universit \u00b4e Paris-Saclay, Ecole Normale Sup \u00b4erieure Paris-Saclay, Cachan, France; CMAP, Ecole Polytechnique, Institut Polytechnique de Paris, Palaiseau, France", "aff_domain": "huawei.com; ; ; ", "email": "huawei.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/plassier21a.html", "aff_unique_index": "0+1;1;2;0", "aff_unique_norm": "Ecole Polytechnique;Lagrange Mathematics and Computing Research Center;Universit\u00e9 Paris-Saclay", "aff_unique_dep": "CMAP;;Ecole Normale Sup\u00e9rieure Paris-Saclay", "aff_unique_url": "https://www.ec-polytechnique.fr;;https://www.universite-paris-saclay.fr", "aff_unique_abbr": "Polytechnique;;", "aff_campus_unique_index": "0+1;1;2;0", "aff_campus_unique": "Palaiseau;Paris;Cachan", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "France" }, { "title": "DORO: Distributional and Outlier Robust Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8919", "id": "8919", "proceeding": "http://proceedings.mlr.press/v139/zhai21a.html", "slides": "", "author_site": "Runtian Zhai, Chen Dan, Zico Kolter, Pradeep Ravikumar", "author": "Runtian Zhai; Chen Dan; Zico Kolter; Pradeep Ravikumar", "abstract": "Many machine learning tasks involve subpopulation shift where the testing data distribution is a subpopulation of the training distribution. For such settings, a line of recent work has proposed the use of a variant of empirical risk minimization(ERM) known as distributionally robust optimization (DRO). In this work, we apply DRO to real, large-scale tasks with subpopulation shift, and observe that DRO performs relatively poorly, and moreover has severe instability. We identify one direct cause of this phenomenon: sensitivity of DRO to outliers in the datasets. To resolve this issue, we propose the framework of DORO, for Distributional and Outlier Robust Optimization. At the core of this approach is a refined risk function which prevents DRO from overfitting to potential outliers. We instantiate DORO for the Cressie-Read family of R\u00e9nyi divergence, and delve into two specific instances of this family: CVaR and $\\chi^2$-DRO. We theoretically prove the effectiveness of the proposed method, and empirically show that DORO improves the performance and stability of DRO with experiments on large modern datasets, thereby positively addressing the open question raised by Hashimoto et al., 2018. Codes are available at https://github.com/RuntianZ/doro.", "bibtex": "@InProceedings{pmlr-v139-zhai21a,\n title = \t {DORO: Distributional and Outlier Robust Optimization},\n author = {Zhai, Runtian and Dan, Chen and Kolter, Zico and Ravikumar, Pradeep},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12345--12355},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhai21a/zhai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhai21a.html},\n abstract = \t {Many machine learning tasks involve subpopulation shift where the testing data distribution is a subpopulation of the training distribution. For such settings, a line of recent work has proposed the use of a variant of empirical risk minimization(ERM) known as distributionally robust optimization (DRO). In this work, we apply DRO to real, large-scale tasks with subpopulation shift, and observe that DRO performs relatively poorly, and moreover has severe instability. We identify one direct cause of this phenomenon: sensitivity of DRO to outliers in the datasets. To resolve this issue, we propose the framework of DORO, for Distributional and Outlier Robust Optimization. At the core of this approach is a refined risk function which prevents DRO from overfitting to potential outliers. We instantiate DORO for the Cressie-Read family of R\u00e9nyi divergence, and delve into two specific instances of this family: CVaR and $\\chi^2$-DRO. We theoretically prove the effectiveness of the proposed method, and empirically show that DORO improves the performance and stability of DRO with experiments on large modern datasets, thereby positively addressing the open question raised by Hashimoto et al., 2018. Codes are available at https://github.com/RuntianZ/doro.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhai21a/zhai21a.pdf", "supp": "", "pdf_size": 723846, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7792478456437572549&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA", "aff_domain": "cmu.edu; ; ; ", "email": "cmu.edu; ; ; ", "github": "https://github.com/RuntianZ/doro", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhai21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Dash: Semi-Supervised Learning with Dynamic Thresholding", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10121", "id": "10121", "proceeding": "http://proceedings.mlr.press/v139/xu21e.html", "slides": "", "author_site": "Yi Xu, Lei Shang, Jinxing Ye, Qi Qian, Yu-Feng Li, Baigui Sun, Hao Li, rong jin", "author": "Yi Xu; Lei Shang; Jinxing Ye; Qi Qian; Yu-Feng Li; Baigui Sun; Hao Li; Rong Jin", "abstract": "While semi-supervised learning (SSL) has received tremendous attentions in many machine learning tasks due to its successful use of unlabeled data, existing SSL algorithms use either all unlabeled examples or the unlabeled examples with a fixed high-confidence prediction during the training progress. However, it is possible that too many correct/wrong pseudo labeled examples are eliminated/selected. In this work we develop a simple yet powerful framework, whose key idea is to select a subset of training examples from the unlabeled data when performing existing SSL methods so that only the unlabeled examples with pseudo labels related to the labeled data will be used to train models. The selection is performed at each updating iteration by only keeping the examples whose losses are smaller than a given threshold that is dynamically adjusted through the iteration. Our proposed approach, Dash, enjoys its adaptivity in terms of unlabeled data selection and its theoretical guarantee. Specifically, we theoretically establish the convergence rate of Dash from the view of non-convex optimization. Finally, we empirically demonstrate the effectiveness of the proposed method in comparison with state-of-the-art over benchmarks.", "bibtex": "@InProceedings{pmlr-v139-xu21e,\n title = \t {Dash: Semi-Supervised Learning with Dynamic Thresholding},\n author = {Xu, Yi and Shang, Lei and Ye, Jinxing and Qian, Qi and Li, Yu-Feng and Sun, Baigui and Li, Hao and Jin, Rong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11525--11536},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21e/xu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21e.html},\n abstract = \t {While semi-supervised learning (SSL) has received tremendous attentions in many machine learning tasks due to its successful use of unlabeled data, existing SSL algorithms use either all unlabeled examples or the unlabeled examples with a fixed high-confidence prediction during the training progress. However, it is possible that too many correct/wrong pseudo labeled examples are eliminated/selected. In this work we develop a simple yet powerful framework, whose key idea is to select a subset of training examples from the unlabeled data when performing existing SSL methods so that only the unlabeled examples with pseudo labels related to the labeled data will be used to train models. The selection is performed at each updating iteration by only keeping the examples whose losses are smaller than a given threshold that is dynamically adjusted through the iteration. Our proposed approach, Dash, enjoys its adaptivity in terms of unlabeled data selection and its theoretical guarantee. Specifically, we theoretically establish the convergence rate of Dash from the view of non-convex optimization. Finally, we empirically demonstrate the effectiveness of the proposed method in comparison with state-of-the-art over benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21e/xu21e.pdf", "supp": "", "pdf_size": 1820220, "gs_citation": 314, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9713529367835707264&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Machine Intelligence Technology, Alibaba Group; Machine Intelligence Technology, Alibaba Group; Machine Intelligence Technology, Alibaba Group; Machine Intelligence Technology, Alibaba Group; National Key Laboratory for Novel Software Technology, Nanjing University; Machine Intelligence Technology, Alibaba Group; Machine Intelligence Technology, Alibaba Group; Machine Intelligence Technology, Alibaba Group", "aff_domain": "alibaba-inc.com; ; ; ;lamda.nju.edu.cn; ; ; ", "email": "alibaba-inc.com; ; ; ;lamda.nju.edu.cn; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/xu21e.html", "aff_unique_index": "0;0;0;0;1;0;0;0", "aff_unique_norm": "Alibaba Group;Nanjing University", "aff_unique_dep": "Machine Intelligence Technology;National Key Laboratory for Novel Software Technology", "aff_unique_url": "https://www.alibaba.com;http://www.nju.edu.cn", "aff_unique_abbr": "Alibaba;Nanjing University", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Data Augmentation for Meta-Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10493", "id": "10493", "proceeding": "http://proceedings.mlr.press/v139/ni21a.html", "slides": "/media/icml-2021/Slides/10493.pdf", "author_site": "Renkun Ni, Micah Goldblum, Amr Sharaf, Kezhi Kong, Tom Goldstein", "author": "Renkun Ni; Micah Goldblum; Amr Sharaf; Kezhi Kong; Tom Goldstein", "abstract": "Conventional image classifiers are trained by randomly sampling mini-batches of images. To achieve state-of-the-art performance, practitioners use sophisticated data augmentation schemes to expand the amount of training data available for sampling. In contrast, meta-learning algorithms sample support data, query data, and tasks on each training step. In this complex sampling scenario, data augmentation can be used not only to expand the number of images available per class, but also to generate entirely new classes/tasks. We systematically dissect the meta-learning pipeline and investigate the distinct ways in which data augmentation can be integrated at both the image and class levels. Our proposed meta-specific data augmentation significantly improves the performance of meta-learners on few-shot classification benchmarks.", "bibtex": "@InProceedings{pmlr-v139-ni21a,\n title = \t {Data Augmentation for Meta-Learning},\n author = {Ni, Renkun and Goldblum, Micah and Sharaf, Amr and Kong, Kezhi and Goldstein, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8152--8161},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ni21a/ni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ni21a.html},\n abstract = \t {Conventional image classifiers are trained by randomly sampling mini-batches of images. To achieve state-of-the-art performance, practitioners use sophisticated data augmentation schemes to expand the amount of training data available for sampling. In contrast, meta-learning algorithms sample support data, query data, and tasks on each training step. In this complex sampling scenario, data augmentation can be used not only to expand the number of images available per class, but also to generate entirely new classes/tasks. We systematically dissect the meta-learning pipeline and investigate the distinct ways in which data augmentation can be integrated at both the image and class levels. Our proposed meta-specific data augmentation significantly improves the performance of meta-learners on few-shot classification benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/ni21a/ni21a.pdf", "supp": "", "pdf_size": 798601, "gs_citation": 102, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2872867843367483483&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Maryland, College Park; Department of Computer Science, University of Maryland, College Park; Microsoft; Department of Computer Science, University of Maryland, College Park; Department of Computer Science, University of Maryland, College Park", "aff_domain": "cs.umd.edu; ; ; ; ", "email": "cs.umd.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/ni21a.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "University of Maryland, College Park;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Corporation", "aff_unique_url": "https://www/umd.edu;https://www.microsoft.com", "aff_unique_abbr": "UMD;Microsoft", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "College Park;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Data augmentation for deep learning based accelerated MRI reconstruction with limited data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9591", "id": "9591", "proceeding": "http://proceedings.mlr.press/v139/fabian21a.html", "slides": "/media/icml-2021/Slides/9591.pdf", "author_site": "Zalan Fabian, Reinhard Heckel, Mahdi Soltanolkotabi", "author": "Zalan Fabian; Reinhard Heckel; Mahdi Soltanolkotabi", "abstract": "Deep neural networks have emerged as very successful tools for image restoration and reconstruction tasks. These networks are often trained end-to-end to directly reconstruct an image from a noisy or corrupted measurement of that image. To achieve state-of-the-art performance, training on large and diverse sets of images is considered critical. However, it is often difficult and/or expensive to collect large amounts of training images. Inspired by the success of Data Augmentation (DA) for classification problems, in this paper, we propose a pipeline for data augmentation for accelerated MRI reconstruction and study its effectiveness at reducing the required training data in a variety of settings. Our DA pipeline, MRAugment, is specifically designed to utilize the invariances present in medical imaging measurements as naive DA strategies that neglect the physics of the problem fail. Through extensive studies on multiple datasets we demonstrate that in the low-data regime DA prevents overfitting and can match or even surpass the state of the art while using significantly fewer training data, whereas in the high-data regime it has diminishing returns. Furthermore, our findings show that DA improves the robustness of the model against various shifts in the test distribution.", "bibtex": "@InProceedings{pmlr-v139-fabian21a,\n title = \t {Data augmentation for deep learning based accelerated MRI reconstruction with limited data},\n author = {Fabian, Zalan and Heckel, Reinhard and Soltanolkotabi, Mahdi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3057--3067},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fabian21a/fabian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fabian21a.html},\n abstract = \t {Deep neural networks have emerged as very successful tools for image restoration and reconstruction tasks. These networks are often trained end-to-end to directly reconstruct an image from a noisy or corrupted measurement of that image. To achieve state-of-the-art performance, training on large and diverse sets of images is considered critical. However, it is often difficult and/or expensive to collect large amounts of training images. Inspired by the success of Data Augmentation (DA) for classification problems, in this paper, we propose a pipeline for data augmentation for accelerated MRI reconstruction and study its effectiveness at reducing the required training data in a variety of settings. Our DA pipeline, MRAugment, is specifically designed to utilize the invariances present in medical imaging measurements as naive DA strategies that neglect the physics of the problem fail. Through extensive studies on multiple datasets we demonstrate that in the low-data regime DA prevents overfitting and can match or even surpass the state of the art while using significantly fewer training data, whereas in the high-data regime it has diminishing returns. Furthermore, our findings show that DA improves the robustness of the model against various shifts in the test distribution.}\n}", "pdf": "http://proceedings.mlr.press/v139/fabian21a/fabian21a.pdf", "supp": "", "pdf_size": 3490933, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=945611859735740790&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Electrical and Computer Engineering, University of Southern California, Los Angeles, California, USA; Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany + Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, University of Southern California, Los Angeles, California, USA", "aff_domain": "usc.edu;tum.de;usc.edu", "email": "usc.edu;tum.de;usc.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fabian21a.html", "aff_unique_index": "0;1+2;0", "aff_unique_norm": "University of Southern California;Technical University of Munich;Rice University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Electrical and Computer Engineering;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.usc.edu;https://www.tum.de;https://www.rice.edu", "aff_unique_abbr": "USC;TUM;Rice", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Los Angeles;Munich;", "aff_country_unique_index": "0;1+0;0", "aff_country_unique": "United States;Germany" }, { "title": "Data-Free Knowledge Distillation for Heterogeneous Federated Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10091", "id": "10091", "proceeding": "http://proceedings.mlr.press/v139/zhu21b.html", "slides": "", "author_site": "Zhuangdi Zhu, Junyuan Hong, Jiayu Zhou", "author": "Zhuangdi Zhu; Junyuan Hong; Jiayu Zhou", "abstract": "Federated Learning (FL) is a decentralized machine-learning paradigm, in which a global server iteratively averages the model parameters of local users without accessing their data. User heterogeneity has imposed significant challenges to FL, which can incur drifted global models that are slow to converge. Knowledge Distillation has recently emerged to tackle this issue, by refining the server model using aggregated knowledge from heterogeneous users, other than directly averaging their model parameters. This approach, however, depends on a proxy dataset, making it impractical unless such a prerequisite is satisfied. Moreover, the ensemble knowledge is not fully utilized to guide local model learning, which may in turn affect the quality of the aggregated model. Inspired by the prior art, we propose a data-free knowledge distillation approach to address heterogeneous FL, where the server learns a lightweight generator to ensemble user information in a data-free manner, which is then broadcasted to users, regulating local training using the learned knowledge as an inductive bias. Empirical studies powered by theoretical implications show that our approach facilitates FL with better generalization performance using fewer communication rounds, compared with the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-zhu21b,\n title = \t {Data-Free Knowledge Distillation for Heterogeneous Federated Learning},\n author = {Zhu, Zhuangdi and Hong, Junyuan and Zhou, Jiayu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12878--12889},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21b/zhu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21b.html},\n abstract = \t {Federated Learning (FL) is a decentralized machine-learning paradigm, in which a global server iteratively averages the model parameters of local users without accessing their data. User heterogeneity has imposed significant challenges to FL, which can incur drifted global models that are slow to converge. Knowledge Distillation has recently emerged to tackle this issue, by refining the server model using aggregated knowledge from heterogeneous users, other than directly averaging their model parameters. This approach, however, depends on a proxy dataset, making it impractical unless such a prerequisite is satisfied. Moreover, the ensemble knowledge is not fully utilized to guide local model learning, which may in turn affect the quality of the aggregated model. Inspired by the prior art, we propose a data-free knowledge distillation approach to address heterogeneous FL, where the server learns a lightweight generator to ensemble user information in a data-free manner, which is then broadcasted to users, regulating local training using the learned knowledge as an inductive bias. Empirical studies powered by theoretical implications show that our approach facilitates FL with better generalization performance using fewer communication rounds, compared with the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21b/zhu21b.pdf", "supp": "", "pdf_size": 4134186, "gs_citation": 888, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7623989304932004124&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science and Engineering, Michigan State University, Michigan, USA; Department of Computer Science and Engineering, Michigan State University, Michigan, USA; Department of Computer Science and Engineering, Michigan State University, Michigan, USA", "aff_domain": "msu.edu; ;msu.edu", "email": "msu.edu; ;msu.edu", "github": "https://github.com/zhuangdizhu/FedGen", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Michigan State University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.msu.edu", "aff_unique_abbr": "MSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Michigan", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Data-driven Prediction of General Hamiltonian Dynamics via Learning Exactly-Symplectic Maps", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9377", "id": "9377", "proceeding": "http://proceedings.mlr.press/v139/chen21r.html", "slides": "", "author_site": "Renyi Chen, Molei Tao", "author": "Renyi Chen; Molei Tao", "abstract": "We consider the learning and prediction of nonlinear time series generated by a latent symplectic map. A special case is (not necessarily separable) Hamiltonian systems, whose solution flows give such symplectic maps. For this special case, both generic approaches based on learning the vector field of the latent ODE and specialized approaches based on learning the Hamiltonian that generates the vector field exist. Our method, however, is different as it does not rely on the vector field nor assume its existence; instead, it directly learns the symplectic evolution map in discrete time. Moreover, we do so by representing the symplectic map via a generating function, which we approximate by a neural network (hence the name GFNN). This way, our approximation of the evolution map is always \\emph{exactly} symplectic. This additional geometric structure allows the local prediction error at each step to accumulate in a controlled fashion, and we will prove, under reasonable assumptions, that the global prediction error grows at most \\emph{linearly} with long prediction time, which significantly improves an otherwise exponential growth. In addition, as a map-based and thus purely data-driven method, GFNN avoids two additional sources of inaccuracies common in vector-field based approaches, namely the error in approximating the vector field by finite difference of the data, and the error in numerical integration of the vector field for making predictions. Numerical experiments further demonstrate our claims.", "bibtex": "@InProceedings{pmlr-v139-chen21r,\n title = \t {Data-driven Prediction of General Hamiltonian Dynamics via Learning Exactly-Symplectic Maps},\n author = {Chen, Renyi and Tao, Molei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1717--1727},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21r/chen21r.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21r.html},\n abstract = \t {We consider the learning and prediction of nonlinear time series generated by a latent symplectic map. A special case is (not necessarily separable) Hamiltonian systems, whose solution flows give such symplectic maps. For this special case, both generic approaches based on learning the vector field of the latent ODE and specialized approaches based on learning the Hamiltonian that generates the vector field exist. Our method, however, is different as it does not rely on the vector field nor assume its existence; instead, it directly learns the symplectic evolution map in discrete time. Moreover, we do so by representing the symplectic map via a generating function, which we approximate by a neural network (hence the name GFNN). This way, our approximation of the evolution map is always \\emph{exactly} symplectic. This additional geometric structure allows the local prediction error at each step to accumulate in a controlled fashion, and we will prove, under reasonable assumptions, that the global prediction error grows at most \\emph{linearly} with long prediction time, which significantly improves an otherwise exponential growth. In addition, as a map-based and thus purely data-driven method, GFNN avoids two additional sources of inaccuracies common in vector-field based approaches, namely the error in approximating the vector field by finite difference of the data, and the error in numerical integration of the vector field for making predictions. Numerical experiments further demonstrate our claims.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21r/chen21r.pdf", "supp": "", "pdf_size": 2584476, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12560285856354081449&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Mathematics, Georgia Institute of Technology, Atlanta, GA, USA; School of Mathematics, Georgia Institute of Technology, Atlanta, GA, USA", "aff_domain": "gatech.edu;gatech.edu", "email": "gatech.edu;gatech.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/chen21r.html", "aff_unique_index": "0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "School of Mathematics", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Data-efficient Hindsight Off-policy Option Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10751", "id": "10751", "proceeding": "http://proceedings.mlr.press/v139/wulfmeier21a.html", "slides": "", "author_site": "Markus Wulfmeier, Dushyant Rao, Roland Hafner, Thomas Lampe, Abbas Abdolmaleki, Tim Hertweck, Michael Neunert, Dhruva Tirumala Bukkapatnam, Noah Siegel, Nicolas Heess, Martin Riedmiller", "author": "Markus Wulfmeier; Dushyant Rao; Roland Hafner; Thomas Lampe; Abbas Abdolmaleki; Tim Hertweck; Michael Neunert; Dhruva Tirumala; Noah Siegel; Nicolas Heess; Martin Riedmiller", "abstract": "We introduce Hindsight Off-policy Options (HO2), a data-efficient option learning algorithm. Given any trajectory, HO2 infers likely option choices and backpropagates through the dynamic programming inference procedure to robustly train all policy components off-policy and end-to-end. The approach outperforms existing option learning methods on common benchmarks. To better understand the option framework and disentangle benefits from both temporal and action abstraction, we evaluate ablations with flat policies and mixture policies with comparable optimization. The results highlight the importance of both types of abstraction as well as off-policy training and trust-region constraints, particularly in challenging, simulated 3D robot manipulation tasks from raw pixel inputs. Finally, we intuitively adapt the inference step to investigate the effect of increased temporal abstraction on training with pre-trained options and from scratch.", "bibtex": "@InProceedings{pmlr-v139-wulfmeier21a,\n title = \t {Data-efficient Hindsight Off-policy Option Learning},\n author = {Wulfmeier, Markus and Rao, Dushyant and Hafner, Roland and Lampe, Thomas and Abdolmaleki, Abbas and Hertweck, Tim and Neunert, Michael and Tirumala, Dhruva and Siegel, Noah and Heess, Nicolas and Riedmiller, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11340--11350},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wulfmeier21a/wulfmeier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wulfmeier21a.html},\n abstract = \t {We introduce Hindsight Off-policy Options (HO2), a data-efficient option learning algorithm. Given any trajectory, HO2 infers likely option choices and backpropagates through the dynamic programming inference procedure to robustly train all policy components off-policy and end-to-end. The approach outperforms existing option learning methods on common benchmarks. To better understand the option framework and disentangle benefits from both temporal and action abstraction, we evaluate ablations with flat policies and mixture policies with comparable optimization. The results highlight the importance of both types of abstraction as well as off-policy training and trust-region constraints, particularly in challenging, simulated 3D robot manipulation tasks from raw pixel inputs. Finally, we intuitively adapt the inference step to investigate the effect of increased temporal abstraction on training with pre-trained options and from scratch.}\n}", "pdf": "http://proceedings.mlr.press/v139/wulfmeier21a/wulfmeier21a.pdf", "supp": "", "pdf_size": 3154511, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1296097676637165629&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "google.com; ; ; ; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 11, "oa": "https://proceedings.mlr.press/v139/wulfmeier21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Dataset Condensation with Differentiable Siamese Augmentation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8609", "id": "8609", "proceeding": "http://proceedings.mlr.press/v139/zhao21a.html", "slides": "/media/icml-2021/Slides/8609.pdf", "author_site": "Bo Zhao, Hakan Bilen", "author": "Bo Zhao; Hakan Bilen", "abstract": "In many machine learning problems, large-scale datasets have become the de-facto standard to train state-of-the-art deep networks at the price of heavy computation load. In this paper, we focus on condensing large training sets into significantly smaller synthetic sets which can be used to train deep neural networks from scratch with minimum drop in performance. Inspired from the recent training set synthesis methods, we propose Differentiable Siamese Augmentation that enables effective use of data augmentation to synthesize more informative synthetic images and thus achieves better performance when training networks with augmentations. Experiments on multiple image classification benchmarks demonstrate that the proposed method obtains substantial gains over the state-of-the-art, 7% improvements on CIFAR10 and CIFAR100 datasets. We show with only less than 1% data that our method achieves 99.6%, 94.9%, 88.5%, 71.5% relative performance on MNIST, FashionMNIST, SVHN, CIFAR10 respectively. We also explore the use of our method in continual learning and neural architecture search, and show promising results.", "bibtex": "@InProceedings{pmlr-v139-zhao21a,\n title = \t {Dataset Condensation with Differentiable Siamese Augmentation},\n author = {Zhao, Bo and Bilen, Hakan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12674--12685},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhao21a/zhao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhao21a.html},\n abstract = \t {In many machine learning problems, large-scale datasets have become the de-facto standard to train state-of-the-art deep networks at the price of heavy computation load. In this paper, we focus on condensing large training sets into significantly smaller synthetic sets which can be used to train deep neural networks from scratch with minimum drop in performance. Inspired from the recent training set synthesis methods, we propose Differentiable Siamese Augmentation that enables effective use of data augmentation to synthesize more informative synthetic images and thus achieves better performance when training networks with augmentations. Experiments on multiple image classification benchmarks demonstrate that the proposed method obtains substantial gains over the state-of-the-art, 7% improvements on CIFAR10 and CIFAR100 datasets. We show with only less than 1% data that our method achieves 99.6%, 94.9%, 88.5%, 71.5% relative performance on MNIST, FashionMNIST, SVHN, CIFAR10 respectively. We also explore the use of our method in continual learning and neural architecture search, and show promising results.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhao21a/zhao21a.pdf", "supp": "", "pdf_size": 3342855, "gs_citation": 375, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14949848395042620640&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Informatics, The University of Edinburgh, UK; School of Informatics, The University of Edinburgh, UK", "aff_domain": "ed.ac.uk;ed.ac.uk", "email": "ed.ac.uk;ed.ac.uk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhao21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Dataset Dynamics via Gradient Flows in Probability Space", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9503", "id": "9503", "proceeding": "http://proceedings.mlr.press/v139/alvarez-melis21a.html", "slides": "", "author_site": "David Alvarez-Melis, Nicolo Fusi", "author": "David Alvarez-Melis; Nicol\u00f2 Fusi", "abstract": "Various machine learning tasks, from generative modeling to domain adaptation, revolve around the concept of dataset transformation and manipulation. While various methods exist for transforming unlabeled datasets, principled methods to do so for labeled (e.g., classification) datasets are missing. In this work, we propose a novel framework for dataset transformation, which we cast as optimization over data-generating joint probability distributions. We approach this class of problems through Wasserstein gradient flows in probability space, and derive practical and efficient particle-based methods for a flexible but well-behaved class of objective functions. Through various experiments, we show that this framework can be used to impose constraints on classification datasets, adapt them for transfer learning, or to re-purpose fixed or black-box models to classify {\u2014}with high accuracy{\u2014} previously unseen datasets.", "bibtex": "@InProceedings{pmlr-v139-alvarez-melis21a,\n title = \t {Dataset Dynamics via Gradient Flows in Probability Space},\n author = {Alvarez-Melis, David and Fusi, Nicol\\`o},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {219--230},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/alvarez-melis21a/alvarez-melis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/alvarez-melis21a.html},\n abstract = \t {Various machine learning tasks, from generative modeling to domain adaptation, revolve around the concept of dataset transformation and manipulation. While various methods exist for transforming unlabeled datasets, principled methods to do so for labeled (e.g., classification) datasets are missing. In this work, we propose a novel framework for dataset transformation, which we cast as optimization over data-generating joint probability distributions. We approach this class of problems through Wasserstein gradient flows in probability space, and derive practical and efficient particle-based methods for a flexible but well-behaved class of objective functions. Through various experiments, we show that this framework can be used to impose constraints on classification datasets, adapt them for transfer learning, or to re-purpose fixed or black-box models to classify {\u2014}with high accuracy{\u2014} previously unseen datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/alvarez-melis21a/alvarez-melis21a.pdf", "supp": "", "pdf_size": 5294300, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5504171616762225423&as_sdt=2005&sciodt=0,5&hl=en&oe=ASCII", "gs_version_total": 6, "aff": "Microsoft Research; Microsoft Research", "aff_domain": "microsoft.com; ", "email": "microsoft.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/alvarez-melis21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Debiasing Model Updates for Improving Personalized Federated Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9599", "id": "9599", "proceeding": "http://proceedings.mlr.press/v139/acar21a.html", "slides": "", "author_site": "Durmus Alp Emre Acar, Yue Zhao, Ruizhao Zhu, Ramon Matas, Matthew Mattina, Paul Whatmough, Venkatesh Saligrama", "author": "Durmus Alp Emre Acar; Yue Zhao; Ruizhao Zhu; Ramon Matas; Matthew Mattina; Paul Whatmough; Venkatesh Saligrama", "abstract": "We propose a novel method for federated learning that is customized specifically to the objective of a given edge device. In our proposed method, a server trains a global meta-model by collaborating with devices without actually sharing data. The trained global meta-model is then personalized locally by each device to meet its specific objective. Different from the conventional federated learning setting, training customized models for each device is hindered by both the inherent data biases of the various devices, as well as the requirements imposed by the federated architecture. We propose gradient correction methods leveraging prior works, and explicitly de-bias the meta-model in the distributed heterogeneous data setting to learn personalized device models. We present convergence guarantees of our method for strongly convex, convex and nonconvex meta objectives. We empirically evaluate the performance of our method on benchmark datasets and demonstrate significant communication savings.", "bibtex": "@InProceedings{pmlr-v139-acar21a,\n title = \t {Debiasing Model Updates for Improving Personalized Federated Training},\n author = {Acar, Durmus Alp Emre and Zhao, Yue and Zhu, Ruizhao and Matas, Ramon and Mattina, Matthew and Whatmough, Paul and Saligrama, Venkatesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {21--31},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/acar21a/acar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/acar21a.html},\n abstract = \t {We propose a novel method for federated learning that is customized specifically to the objective of a given edge device. In our proposed method, a server trains a global meta-model by collaborating with devices without actually sharing data. The trained global meta-model is then personalized locally by each device to meet its specific objective. Different from the conventional federated learning setting, training customized models for each device is hindered by both the inherent data biases of the various devices, as well as the requirements imposed by the federated architecture. We propose gradient correction methods leveraging prior works, and explicitly de-bias the meta-model in the distributed heterogeneous data setting to learn personalized device models. We present convergence guarantees of our method for strongly convex, convex and nonconvex meta objectives. We empirically evaluate the performance of our method on benchmark datasets and demonstrate significant communication savings.}\n}", "pdf": "http://proceedings.mlr.press/v139/acar21a/acar21a.pdf", "supp": "", "pdf_size": 414895, "gs_citation": 86, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10771581832158034135&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Boston University, Boston, MA; Arm ML Research Lab, Boston, MA; Boston University, Boston, MA; Arm ML Research Lab, Boston, MA; Arm ML Research Lab, Boston, MA; Arm ML Research Lab, Boston, MA; Boston University, Boston, MA", "aff_domain": "bu.edu; ; ; ; ; ;bu.edu", "email": "bu.edu; ; ; ; ; ;bu.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/acar21a.html", "aff_unique_index": "0;1;0;1;1;1;0", "aff_unique_norm": "Boston University;Arm ML Research Lab", "aff_unique_dep": ";ML Research", "aff_unique_url": "https://www.bu.edu;https://www.arm.com/research", "aff_unique_abbr": "BU;Arm ML", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Debiasing a First-order Heuristic for Approximate Bi-level Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9027", "id": "9027", "proceeding": "http://proceedings.mlr.press/v139/likhosherstov21a.html", "slides": "", "author_site": "Valerii Likhosherstov, Xingyou Song, Krzysztof Choromanski, Jared Quincy Davis, Adrian Weller", "author": "Valerii Likhosherstov; Xingyou Song; Krzysztof Choromanski; Jared Q Davis; Adrian Weller", "abstract": "Approximate bi-level optimization (ABLO) consists of (outer-level) optimization problems, involving numerical (inner-level) optimization loops. While ABLO has many applications across deep learning, it suffers from time and memory complexity proportional to the length $r$ of its inner optimization loop. To address this complexity, an earlier first-order method (FOM) was proposed as a heuristic which omits second derivative terms, yielding significant speed gains and requiring only constant memory. Despite FOM\u2019s popularity, there is a lack of theoretical understanding of its convergence properties. We contribute by theoretically characterizing FOM\u2019s gradient bias under mild assumptions. We further demonstrate a rich family of examples where FOM-based SGD does not converge to a stationary point of the ABLO objective. We address this concern by proposing an unbiased FOM (UFOM) enjoying constant memory complexity as a function of $r$. We characterize the introduced time-variance tradeoff, demonstrate convergence bounds, and find an optimal UFOM for a given ABLO problem. Finally, we propose an efficient adaptive UFOM scheme.", "bibtex": "@InProceedings{pmlr-v139-likhosherstov21a,\n title = \t {Debiasing a First-order Heuristic for Approximate Bi-level Optimization},\n author = {Likhosherstov, Valerii and Song, Xingyou and Choromanski, Krzysztof and Davis, Jared Q and Weller, Adrian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6621--6630},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/likhosherstov21a/likhosherstov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/likhosherstov21a.html},\n abstract = \t {Approximate bi-level optimization (ABLO) consists of (outer-level) optimization problems, involving numerical (inner-level) optimization loops. While ABLO has many applications across deep learning, it suffers from time and memory complexity proportional to the length $r$ of its inner optimization loop. To address this complexity, an earlier first-order method (FOM) was proposed as a heuristic which omits second derivative terms, yielding significant speed gains and requiring only constant memory. Despite FOM\u2019s popularity, there is a lack of theoretical understanding of its convergence properties. We contribute by theoretically characterizing FOM\u2019s gradient bias under mild assumptions. We further demonstrate a rich family of examples where FOM-based SGD does not converge to a stationary point of the ABLO objective. We address this concern by proposing an unbiased FOM (UFOM) enjoying constant memory complexity as a function of $r$. We characterize the introduced time-variance tradeoff, demonstrate convergence bounds, and find an optimal UFOM for a given ABLO problem. Finally, we propose an efficient adaptive UFOM scheme.}\n}", "pdf": "http://proceedings.mlr.press/v139/likhosherstov21a/likhosherstov21a.pdf", "supp": "", "pdf_size": 6365233, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11037305189679806516&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Cambridge; Google Research, Brain Team; Columbia University; Deepmind; Stanford University + The Alan Turing Institute", "aff_domain": "cam.ac.uk; ; ; ; ", "email": "cam.ac.uk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/likhosherstov21a.html", "aff_unique_index": "0;1;2;3;4+5", "aff_unique_norm": "University of Cambridge;Google;Columbia University;DeepMind;Stanford University;Alan Turing Institute", "aff_unique_dep": ";Google Research;;;;", "aff_unique_url": "https://www.cam.ac.uk;https://research.google;https://www.columbia.edu;https://deepmind.com;https://www.stanford.edu;https://www.turing.ac.uk", "aff_unique_abbr": "Cambridge;Google;Columbia;DeepMind;Stanford;ATI", "aff_campus_unique_index": "0;1;3", "aff_campus_unique": "Cambridge;Mountain View;;Stanford", "aff_country_unique_index": "0;1;1;0;1+0", "aff_country_unique": "United Kingdom;United States" }, { "title": "Decentralized Riemannian Gradient Descent on the Stiefel Manifold", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8405", "id": "8405", "proceeding": "http://proceedings.mlr.press/v139/chen21g.html", "slides": "/media/icml-2021/Slides/8405.pdf", "author_site": "Shixiang Chen, Alfredo Garcia, Mingyi Hong, Shahin Shahrampour", "author": "Shixiang Chen; Alfredo Garcia; Mingyi Hong; Shahin Shahrampour", "abstract": "We consider a distributed non-convex optimization where a network of agents aims at minimizing a global function over the Stiefel manifold. The global function is represented as a finite sum of smooth local functions, where each local function is associated with one agent and agents communicate with each other over an undirected connected graph. The problem is non-convex as local functions are possibly non-convex (but smooth) and the Steifel manifold is a non-convex set. We present a decentralized Riemannian stochastic gradient method (DRSGD) with the convergence rate of $\\mathcal{O}(1/\\sqrt{K})$ to a stationary point. To have exact convergence with constant stepsize, we also propose a decentralized Riemannian gradient tracking algorithm (DRGTA) with the convergence rate of $\\mathcal{O}(1/K)$ to a stationary point. We use multi-step consensus to preserve the iteration in the local (consensus) region. DRGTA is the first decentralized algorithm with exact convergence for distributed optimization on Stiefel manifold.", "bibtex": "@InProceedings{pmlr-v139-chen21g,\n title = \t {Decentralized Riemannian Gradient Descent on the Stiefel Manifold},\n author = {Chen, Shixiang and Garcia, Alfredo and Hong, Mingyi and Shahrampour, Shahin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1594--1605},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21g/chen21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21g.html},\n abstract = \t {We consider a distributed non-convex optimization where a network of agents aims at minimizing a global function over the Stiefel manifold. The global function is represented as a finite sum of smooth local functions, where each local function is associated with one agent and agents communicate with each other over an undirected connected graph. The problem is non-convex as local functions are possibly non-convex (but smooth) and the Steifel manifold is a non-convex set. We present a decentralized Riemannian stochastic gradient method (DRSGD) with the convergence rate of $\\mathcal{O}(1/\\sqrt{K})$ to a stationary point. To have exact convergence with constant stepsize, we also propose a decentralized Riemannian gradient tracking algorithm (DRGTA) with the convergence rate of $\\mathcal{O}(1/K)$ to a stationary point. We use multi-step consensus to preserve the iteration in the local (consensus) region. DRGTA is the first decentralized algorithm with exact convergence for distributed optimization on Stiefel manifold.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21g/chen21g.pdf", "supp": "", "pdf_size": 4049502, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10235515881899160189&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The Wm Michael Barnes '64 Department of Industrial and Systems Engineering, Texas A&M University, College Station, TX 77843, USA; The Wm Michael Barnes '64 Department of Industrial and Systems Engineering, Texas A&M University, College Station, TX 77843, USA; The Department of Electrical and Computer Engineering, University of Minnesota, Minneapolis, MN 55455, USA; The Wm Michael Barnes '64 Department of Industrial and Systems Engineering, Texas A&M University, College Station, TX 77843, USA", "aff_domain": "tamu.edu;tamu.edu;umn.edu;tamu.edu", "email": "tamu.edu;tamu.edu;umn.edu;tamu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chen21g.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Texas A&M University;University of Minnesota", "aff_unique_dep": "Department of Industrial and Systems Engineering;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.tamu.edu;https://www.umn.edu", "aff_unique_abbr": "TAMU;UMN", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "College Station;Minneapolis", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Decentralized Single-Timescale Actor-Critic on Zero-Sum Two-Player Stochastic Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9619", "id": "9619", "proceeding": "http://proceedings.mlr.press/v139/guo21a.html", "slides": "", "author_site": "Hongyi Guo, Zuyue Fu, Zhuoran Yang, Zhaoran Wang", "author": "Hongyi Guo; Zuyue Fu; Zhuoran Yang; Zhaoran Wang", "abstract": "We study the global convergence and global optimality of the actor-critic algorithm applied for the zero-sum two-player stochastic games in a decentralized manner. We focus on the single-timescale setting where the critic is updated by applying the Bellman operator only once and the actor is updated by policy gradient with the information from the critic. Our algorithm is in a decentralized manner, as we assume that each player has no access to the actions of the other one, which, in a way, protects the privacy of both players. Moreover, we consider linear function approximations for both actor and critic, and we prove that the sequence of joint policy generated by our decentralized linear algorithm converges to the minimax equilibrium at a sublinear rate \\(\\cO(\\sqrt{K})\\), where \\(K\\){is} the number of iterations. To the best of our knowledge, we establish the global optimality and convergence of decentralized actor-critic algorithm on zero-sum two-player stochastic games with linear function approximations for the first time.", "bibtex": "@InProceedings{pmlr-v139-guo21a,\n title = \t {Decentralized Single-Timescale Actor-Critic on Zero-Sum Two-Player Stochastic Games},\n author = {Guo, Hongyi and Fu, Zuyue and Yang, Zhuoran and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3899--3909},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/guo21a/guo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/guo21a.html},\n abstract = \t {We study the global convergence and global optimality of the actor-critic algorithm applied for the zero-sum two-player stochastic games in a decentralized manner. We focus on the single-timescale setting where the critic is updated by applying the Bellman operator only once and the actor is updated by policy gradient with the information from the critic. Our algorithm is in a decentralized manner, as we assume that each player has no access to the actions of the other one, which, in a way, protects the privacy of both players. Moreover, we consider linear function approximations for both actor and critic, and we prove that the sequence of joint policy generated by our decentralized linear algorithm converges to the minimax equilibrium at a sublinear rate \\(\\cO(\\sqrt{K})\\), where \\(K\\){is} the number of iterations. To the best of our knowledge, we establish the global optimality and convergence of decentralized actor-critic algorithm on zero-sum two-player stochastic games with linear function approximations for the first time.}\n}", "pdf": "http://proceedings.mlr.press/v139/guo21a/guo21a.pdf", "supp": "", "pdf_size": 321936, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8581953991297272179&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Northwestern University; Northwestern University; Princeton University; Northwestern University", "aff_domain": "u.northwestern.edu;u.northwestern.edu;gmail.com;gmail.com", "email": "u.northwestern.edu;u.northwestern.edu;gmail.com;gmail.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/guo21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Northwestern University;Princeton University", "aff_unique_dep": ";", "aff_unique_url": "https://www.northwestern.edu;https://www.princeton.edu", "aff_unique_abbr": "NU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Deciding What to Learn: A Rate-Distortion Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10475", "id": "10475", "proceeding": "http://proceedings.mlr.press/v139/arumugam21a.html", "slides": "", "author_site": "Dilip Arumugam, Benjamin Van Roy", "author": "Dilip Arumugam; Benjamin Van Roy", "abstract": "Agents that learn to select optimal actions represent a prominent focus of the sequential decision-making literature. In the face of a complex environment or constraints on time and resources, however, aiming to synthesize such an optimal policy can become infeasible. These scenarios give rise to an important trade-off between the information an agent must acquire to learn and the sub-optimality of the resulting policy. While an agent designer has a preference for how this trade-off is resolved, existing approaches further require that the designer translate these preferences into a fixed learning target for the agent. In this work, leveraging rate-distortion theory, we automate this process such that the designer need only express their preferences via a single hyperparameter and the agent is endowed with the ability to compute its own learning targets that best achieve the desired trade-off. We establish a general bound on expected discounted regret for an agent that decides what to learn in this manner along with computational experiments that illustrate the expressiveness of designer preferences and even show improvements over Thompson sampling in identifying an optimal policy.", "bibtex": "@InProceedings{pmlr-v139-arumugam21a,\n title = \t {Deciding What to Learn: A Rate-Distortion Approach},\n author = {Arumugam, Dilip and Van Roy, Benjamin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {373--382},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/arumugam21a/arumugam21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/arumugam21a.html},\n abstract = \t {Agents that learn to select optimal actions represent a prominent focus of the sequential decision-making literature. In the face of a complex environment or constraints on time and resources, however, aiming to synthesize such an optimal policy can become infeasible. These scenarios give rise to an important trade-off between the information an agent must acquire to learn and the sub-optimality of the resulting policy. While an agent designer has a preference for how this trade-off is resolved, existing approaches further require that the designer translate these preferences into a fixed learning target for the agent. In this work, leveraging rate-distortion theory, we automate this process such that the designer need only express their preferences via a single hyperparameter and the agent is endowed with the ability to compute its own learning targets that best achieve the desired trade-off. We establish a general bound on expected discounted regret for an agent that decides what to learn in this manner along with computational experiments that illustrate the expressiveness of designer preferences and even show improvements over Thompson sampling in identifying an optimal policy.}\n}", "pdf": "http://proceedings.mlr.press/v139/arumugam21a/arumugam21a.pdf", "supp": "", "pdf_size": 1109908, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13750365440547008784&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stanford University, California, USA; Stanford University, California, USA", "aff_domain": "cs.stanford.edu; ", "email": "cs.stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/arumugam21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "California", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Decision-Making Under Selective Labels: Optimal Finite-Domain Policies and Beyond", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10109", "id": "10109", "proceeding": "http://proceedings.mlr.press/v139/wei21a.html", "slides": "/media/icml-2021/Slides/10109.pdf", "author": "Dennis Wei", "abstract": "Selective labels are a common feature of high-stakes decision-making applications, referring to the lack of observed outcomes under one of the possible decisions. This paper studies the learning of decision policies in the face of selective labels, in an online setting that balances learning costs against future utility. In the homogeneous case in which individuals\u2019 features are disregarded, the optimal decision policy is shown to be a threshold policy. The threshold becomes more stringent as more labels are collected; the rate at which this occurs is characterized. In the case of features drawn from a finite domain, the optimal policy consists of multiple homogeneous policies in parallel. For the general infinite-domain case, the homogeneous policy is extended by using a probabilistic classifier and bootstrapping to provide its inputs. In experiments on synthetic and real data, the proposed policies achieve consistently superior utility with no parameter tuning in the finite-domain case and lower parameter sensitivity in the general case.", "bibtex": "@InProceedings{pmlr-v139-wei21a,\n title = \t {Decision-Making Under Selective Labels: Optimal Finite-Domain Policies and Beyond},\n author = {Wei, Dennis},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11035--11046},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wei21a/wei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wei21a.html},\n abstract = \t {Selective labels are a common feature of high-stakes decision-making applications, referring to the lack of observed outcomes under one of the possible decisions. This paper studies the learning of decision policies in the face of selective labels, in an online setting that balances learning costs against future utility. In the homogeneous case in which individuals\u2019 features are disregarded, the optimal decision policy is shown to be a threshold policy. The threshold becomes more stringent as more labels are collected; the rate at which this occurs is characterized. In the case of features drawn from a finite domain, the optimal policy consists of multiple homogeneous policies in parallel. For the general infinite-domain case, the homogeneous policy is extended by using a probabilistic classifier and bootstrapping to provide its inputs. In experiments on synthetic and real data, the proposed policies achieve consistently superior utility with no parameter tuning in the finite-domain case and lower parameter sensitivity in the general case.}\n}", "pdf": "http://proceedings.mlr.press/v139/wei21a/wei21a.pdf", "supp": "", "pdf_size": 3915149, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2665750968391235282&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "IBM Research, Yorktown Heights, NY, USA", "aff_domain": "us.ibm.com", "email": "us.ibm.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/wei21a.html", "aff_unique_index": "0", "aff_unique_norm": "IBM", "aff_unique_dep": "IBM Research", "aff_unique_url": "https://www.ibm.com/research", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0", "aff_campus_unique": "Yorktown Heights", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Decomposable Submodular Function Minimization via Maximum Flow", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9123", "id": "9123", "proceeding": "http://proceedings.mlr.press/v139/axiotis21a.html", "slides": "", "author_site": "Kyriakos Axiotis, Adam Karczmarz, Anish Mukherjee, Piotr Sankowski, Adrian Vladu", "author": "Kyriakos Axiotis; Adam Karczmarz; Anish Mukherjee; Piotr Sankowski; Adrian Vladu", "abstract": "This paper bridges discrete and continuous optimization approaches for decomposable submodular function minimization, in both the standard and parametric settings. We provide improved running times for this problem by reducing it to a number of calls to a maximum flow oracle. When each function in the decomposition acts on O(1) elements of the ground set V and is polynomially bounded, our running time is up to polylogarithmic factors equal to that of solving maximum flow in a sparse graph with O(|V|) vertices and polynomial integral capacities. We achieve this by providing a simple iterative method which can optimize to high precision any convex function defined on the submodular base polytope, provided we can efficiently minimize it on the base polytope corresponding to the cut function of a certain graph that we construct. We solve this minimization problem by lifting the solutions of a parametric cut problem, which we obtain via a new efficient combinatorial reduction to maximum flow. This reduction is of independent interest and implies some previously unknown bounds for the parametric minimum s,t-cut problem in multiple settings.", "bibtex": "@InProceedings{pmlr-v139-axiotis21a,\n title = \t {Decomposable Submodular Function Minimization via Maximum Flow},\n author = {Axiotis, Kyriakos and Karczmarz, Adam and Mukherjee, Anish and Sankowski, Piotr and Vladu, Adrian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {446--456},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/axiotis21a/axiotis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/axiotis21a.html},\n abstract = \t {This paper bridges discrete and continuous optimization approaches for decomposable submodular function minimization, in both the standard and parametric settings. We provide improved running times for this problem by reducing it to a number of calls to a maximum flow oracle. When each function in the decomposition acts on O(1) elements of the ground set V and is polynomially bounded, our running time is up to polylogarithmic factors equal to that of solving maximum flow in a sparse graph with O(|V|) vertices and polynomial integral capacities. We achieve this by providing a simple iterative method which can optimize to high precision any convex function defined on the submodular base polytope, provided we can efficiently minimize it on the base polytope corresponding to the cut function of a certain graph that we construct. We solve this minimization problem by lifting the solutions of a parametric cut problem, which we obtain via a new efficient combinatorial reduction to maximum flow. This reduction is of independent interest and implies some previously unknown bounds for the parametric minimum s,t-cut problem in multiple settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/axiotis21a/axiotis21a.pdf", "supp": "", "pdf_size": 411338, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1315278567944283190&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "MIT; University of Warsaw; University of Warsaw; IDEAS NCBR+MIM Solutions; CNRS+IRIF, Universit\u00e9 de Paris", "aff_domain": "mit.edu; ; ;mimuw.edu.pl;irif.fr", "email": "mit.edu; ; ;mimuw.edu.pl;irif.fr", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/axiotis21a.html", "aff_unique_index": "0;1;1;2+3;4+5", "aff_unique_norm": "Massachusetts Institute of Technology;University of Warsaw;Institute for Development, Economic Analysis, and Simulation (IDEAS);MIM Solutions;Centre National de la Recherche Scientifique;Universit\u00e9 de Paris", "aff_unique_dep": ";;;;;IRIF", "aff_unique_url": "https://web.mit.edu;https://www.uw.edu.pl;https://www.ideas-ncbr.gov.pl;;https://www.cnrs.fr;https://www.univ-paris-diderot.fr", "aff_unique_abbr": "MIT;UW;IDEAS;;CNRS;UP", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;1;3+3", "aff_country_unique": "United States;Poland;;France" }, { "title": "Decomposed Mutual Information Estimation for Contrastive Representation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9751", "id": "9751", "proceeding": "http://proceedings.mlr.press/v139/sordoni21a.html", "slides": "/media/icml-2021/Slides/9751_Bq8rUx0.pdf", "author_site": "Alessandro Sordoni, Nouha Dziri, Hannes Schulz, Geoff Gordon, Philip Bachman, Remi Tachet des Combes", "author": "Alessandro Sordoni; Nouha Dziri; Hannes Schulz; Geoff Gordon; Philip Bachman; Remi Tachet Des Combes", "abstract": "Recent contrastive representation learning methods rely on estimating mutual information (MI) between multiple views of an underlying context. E.g., we can derive multiple views of a given image by applying data augmentation, or we can split a sequence into views comprising the past and future of some step in the sequence. Contrastive lower bounds on MI are easy to optimize, but have a strong underestimation bias when estimating large amounts of MI. We propose decomposing the full MI estimation problem into a sum of smaller estimation problems by splitting one of the views into progressively more informed subviews and by applying the chain rule on MI between the decomposed views. This expression contains a sum of unconditional and conditional MI terms, each measuring modest chunks of the total MI, which facilitates approximation via contrastive bounds. To maximize the sum, we formulate a contrastive lower bound on the conditional MI which can be approximated efficiently. We refer to our general approach as Decomposed Estimation of Mutual Information (DEMI). We show that DEMI can capture a larger amount of MI than standard non-decomposed contrastive bounds in a synthetic setting, and learns better representations in a vision domain and for dialogue generation.", "bibtex": "@InProceedings{pmlr-v139-sordoni21a,\n title = \t {Decomposed Mutual Information Estimation for Contrastive Representation Learning},\n author = {Sordoni, Alessandro and Dziri, Nouha and Schulz, Hannes and Gordon, Geoff and Bachman, Philip and Combes, Remi Tachet Des},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9859--9869},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sordoni21a/sordoni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sordoni21a.html},\n abstract = \t {Recent contrastive representation learning methods rely on estimating mutual information (MI) between multiple views of an underlying context. E.g., we can derive multiple views of a given image by applying data augmentation, or we can split a sequence into views comprising the past and future of some step in the sequence. Contrastive lower bounds on MI are easy to optimize, but have a strong underestimation bias when estimating large amounts of MI. We propose decomposing the full MI estimation problem into a sum of smaller estimation problems by splitting one of the views into progressively more informed subviews and by applying the chain rule on MI between the decomposed views. This expression contains a sum of unconditional and conditional MI terms, each measuring modest chunks of the total MI, which facilitates approximation via contrastive bounds. To maximize the sum, we formulate a contrastive lower bound on the conditional MI which can be approximated efficiently. We refer to our general approach as Decomposed Estimation of Mutual Information (DEMI). We show that DEMI can capture a larger amount of MI than standard non-decomposed contrastive bounds in a synthetic setting, and learns better representations in a vision domain and for dialogue generation.}\n}", "pdf": "http://proceedings.mlr.press/v139/sordoni21a/sordoni21a.pdf", "supp": "", "pdf_size": 807387, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11074898973258655358&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Microsoft Research; University of Alberta; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research", "aff_domain": "microsoft.com;cs.ualberta.ca; ; ; ; ", "email": "microsoft.com;cs.ualberta.ca; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/sordoni21a.html", "aff_unique_index": "0;1;0;0;0;0", "aff_unique_norm": "Microsoft;University of Alberta", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.ualberta.ca", "aff_unique_abbr": "MSR;UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0;0;0", "aff_country_unique": "United States;Canada" }, { "title": "Decoupling Exploration and Exploitation for Meta-Reinforcement Learning without Sacrifices", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8991", "id": "8991", "proceeding": "http://proceedings.mlr.press/v139/liu21s.html", "slides": "/media/icml-2021/Slides/8991.pdf", "author_site": "Evan Liu, Aditi Raghunathan, Percy Liang, Chelsea Finn", "author": "Evan Z Liu; Aditi Raghunathan; Percy Liang; Chelsea Finn", "abstract": "The goal of meta-reinforcement learning (meta-RL) is to build agents that can quickly learn new tasks by leveraging prior experience on related tasks. Learning a new task often requires both exploring to gather task-relevant information and exploiting this information to solve the task. In principle, optimal exploration and exploitation can be learned end-to-end by simply maximizing task performance. However, such meta-RL approaches struggle with local optima due to a chicken-and-egg problem: learning to explore requires good exploitation to gauge the exploration\u2019s utility, but learning to exploit requires information gathered via exploration. Optimizing separate objectives for exploration and exploitation can avoid this problem, but prior meta-RL exploration objectives yield suboptimal policies that gather information irrelevant to the task. We alleviate both concerns by constructing an exploitation objective that automatically identifies task-relevant information and an exploration objective to recover only this information. This avoids local optima in end-to-end training, without sacrificing optimal exploration. Empirically, DREAM substantially outperforms existing approaches on complex meta-RL problems, such as sparse-reward 3D visual navigation. Videos of DREAM: https://ezliu.github.io/dream/", "bibtex": "@InProceedings{pmlr-v139-liu21s,\n title = \t {Decoupling Exploration and Exploitation for Meta-Reinforcement Learning without Sacrifices},\n author = {Liu, Evan Z and Raghunathan, Aditi and Liang, Percy and Finn, Chelsea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6925--6935},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21s/liu21s.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21s.html},\n abstract = \t {The goal of meta-reinforcement learning (meta-RL) is to build agents that can quickly learn new tasks by leveraging prior experience on related tasks. Learning a new task often requires both exploring to gather task-relevant information and exploiting this information to solve the task. In principle, optimal exploration and exploitation can be learned end-to-end by simply maximizing task performance. However, such meta-RL approaches struggle with local optima due to a chicken-and-egg problem: learning to explore requires good exploitation to gauge the exploration\u2019s utility, but learning to exploit requires information gathered via exploration. Optimizing separate objectives for exploration and exploitation can avoid this problem, but prior meta-RL exploration objectives yield suboptimal policies that gather information irrelevant to the task. We alleviate both concerns by constructing an exploitation objective that automatically identifies task-relevant information and an exploration objective to recover only this information. This avoids local optima in end-to-end training, without sacrificing optimal exploration. Empirically, DREAM substantially outperforms existing approaches on complex meta-RL problems, such as sparse-reward 3D visual navigation. Videos of DREAM: https://ezliu.github.io/dream/}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21s/liu21s.pdf", "supp": "", "pdf_size": 2928107, "gs_citation": 90, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=443689404393328118&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu; ; ; ", "email": "cs.stanford.edu; ; ; ", "github": "", "project": "https://ezliu.github.io/dream/", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21s.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Decoupling Representation Learning from Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10141", "id": "10141", "proceeding": "http://proceedings.mlr.press/v139/stooke21a.html", "slides": "", "author_site": "Adam Stooke, Kimin Lee, Pieter Abbeel, Michael Laskin", "author": "Adam Stooke; Kimin Lee; Pieter Abbeel; Michael Laskin", "abstract": "In an effort to overcome limitations of reward-driven feature learning in deep reinforcement learning (RL) from images, we propose decoupling representation learning from policy learning. To this end, we introduce a new unsupervised learning (UL) task, called Augmented Temporal Contrast (ATC), which trains a convolutional encoder to associate pairs of observations separated by a short time difference, under image augmentations and using a contrastive loss. In online RL experiments, we show that training the encoder exclusively using ATC matches or outperforms end-to-end RL in most environments. Additionally, we benchmark several leading UL algorithms by pre-training encoders on expert demonstrations and using them, with weights frozen, in RL agents; we find that agents using ATC-trained encoders outperform all others. We also train multi-task encoders on data from multiple environments and show generalization to different downstream RL tasks. Finally, we ablate components of ATC, and introduce a new data augmentation to enable replay of (compressed) latent images from pre-trained encoders when RL requires augmentation. Our experiments span visually diverse RL benchmarks in DeepMind Control, DeepMind Lab, and Atari, and our complete code is available at \\url{https://github.com/astooke/rlpyt/tree/master/rlpyt/ul}.", "bibtex": "@InProceedings{pmlr-v139-stooke21a,\n title = \t {Decoupling Representation Learning from Reinforcement Learning},\n author = {Stooke, Adam and Lee, Kimin and Abbeel, Pieter and Laskin, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9870--9879},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/stooke21a/stooke21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/stooke21a.html},\n abstract = \t {In an effort to overcome limitations of reward-driven feature learning in deep reinforcement learning (RL) from images, we propose decoupling representation learning from policy learning. To this end, we introduce a new unsupervised learning (UL) task, called Augmented Temporal Contrast (ATC), which trains a convolutional encoder to associate pairs of observations separated by a short time difference, under image augmentations and using a contrastive loss. In online RL experiments, we show that training the encoder exclusively using ATC matches or outperforms end-to-end RL in most environments. Additionally, we benchmark several leading UL algorithms by pre-training encoders on expert demonstrations and using them, with weights frozen, in RL agents; we find that agents using ATC-trained encoders outperform all others. We also train multi-task encoders on data from multiple environments and show generalization to different downstream RL tasks. Finally, we ablate components of ATC, and introduce a new data augmentation to enable replay of (compressed) latent images from pre-trained encoders when RL requires augmentation. Our experiments span visually diverse RL benchmarks in DeepMind Control, DeepMind Lab, and Atari, and our complete code is available at \\url{https://github.com/astooke/rlpyt/tree/master/rlpyt/ul}.}\n}", "pdf": "http://proceedings.mlr.press/v139/stooke21a/stooke21a.pdf", "supp": "", "pdf_size": 2112724, "gs_citation": 403, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4351064812627090102&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu; ; ;berkeley.edu", "email": "berkeley.edu; ; ;berkeley.edu", "github": "https://github.com/astooke/rlpyt/tree/master/rlpyt/ul", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/stooke21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Decoupling Value and Policy for Generalization in Reinforcement Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9343", "id": "9343", "proceeding": "http://proceedings.mlr.press/v139/raileanu21a.html", "slides": "/media/icml-2021/Slides/9343.pdf", "author_site": "Roberta Raileanu, Rob Fergus", "author": "Roberta Raileanu; Rob Fergus", "abstract": "Standard deep reinforcement learning algorithms use a shared representation for the policy and value function, especially when training directly from images. However, we argue that more information is needed to accurately estimate the value function than to learn the optimal policy. Consequently, the use of a shared representation for the policy and value function can lead to overfitting. To alleviate this problem, we propose two approaches which are combined to create IDAAC: Invariant Decoupled Advantage Actor-Critic. First, IDAAC decouples the optimization of the policy and value function, using separate networks to model them. Second, it introduces an auxiliary loss which encourages the representation to be invariant to task-irrelevant properties of the environment. IDAAC shows good generalization to unseen environments, achieving a new state-of-the-art on the Procgen benchmark and outperforming popular methods on DeepMind Control tasks with distractors. Our implementation is available at https://github.com/rraileanu/idaac.", "bibtex": "@InProceedings{pmlr-v139-raileanu21a,\n title = \t {Decoupling Value and Policy for Generalization in Reinforcement Learning},\n author = {Raileanu, Roberta and Fergus, Rob},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8787--8798},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/raileanu21a/raileanu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/raileanu21a.html},\n abstract = \t {Standard deep reinforcement learning algorithms use a shared representation for the policy and value function, especially when training directly from images. However, we argue that more information is needed to accurately estimate the value function than to learn the optimal policy. Consequently, the use of a shared representation for the policy and value function can lead to overfitting. To alleviate this problem, we propose two approaches which are combined to create IDAAC: Invariant Decoupled Advantage Actor-Critic. First, IDAAC decouples the optimization of the policy and value function, using separate networks to model them. Second, it introduces an auxiliary loss which encourages the representation to be invariant to task-irrelevant properties of the environment. IDAAC shows good generalization to unseen environments, achieving a new state-of-the-art on the Procgen benchmark and outperforming popular methods on DeepMind Control tasks with distractors. Our implementation is available at https://github.com/rraileanu/idaac.}\n}", "pdf": "http://proceedings.mlr.press/v139/raileanu21a/raileanu21a.pdf", "supp": "", "pdf_size": 4832370, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12990450966698605101&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": ";", "aff_domain": ";", "email": ";", "github": "https://github.com/rraileanu/idaac", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/raileanu21a.html" }, { "title": "Deep Adaptive Design: Amortizing Sequential Bayesian Experimental Design", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9523", "id": "9523", "proceeding": "http://proceedings.mlr.press/v139/foster21a.html", "slides": "/media/icml-2021/Slides/9523.pdf", "author_site": "Adam Foster, Desi Ivanova, ILYAS MALIK, Tom Rainforth", "author": "Adam Foster; Desi R Ivanova; Ilyas Malik; Tom Rainforth", "abstract": "We introduce Deep Adaptive Design (DAD), a method for amortizing the cost of adaptive Bayesian experimental design that allows experiments to be run in real-time. Traditional sequential Bayesian optimal experimental design approaches require substantial computation at each stage of the experiment. This makes them unsuitable for most real-world applications, where decisions must typically be made quickly. DAD addresses this restriction by learning an amortized design network upfront and then using this to rapidly run (multiple) adaptive experiments at deployment time. This network represents a design policy which takes as input the data from previous steps, and outputs the next design using a single forward pass; these design decisions can be made in milliseconds during the live experiment. To train the network, we introduce contrastive information bounds that are suitable objectives for the sequential setting, and propose a customized network architecture that exploits key symmetries. We demonstrate that DAD successfully amortizes the process of experimental design, outperforming alternative strategies on a number of problems.", "bibtex": "@InProceedings{pmlr-v139-foster21a,\n title = \t {Deep Adaptive Design: Amortizing Sequential Bayesian Experimental Design},\n author = {Foster, Adam and Ivanova, Desi R and Malik, Ilyas and Rainforth, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3384--3395},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/foster21a/foster21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/foster21a.html},\n abstract = \t {We introduce Deep Adaptive Design (DAD), a method for amortizing the cost of adaptive Bayesian experimental design that allows experiments to be run in real-time. Traditional sequential Bayesian optimal experimental design approaches require substantial computation at each stage of the experiment. This makes them unsuitable for most real-world applications, where decisions must typically be made quickly. DAD addresses this restriction by learning an amortized design network upfront and then using this to rapidly run (multiple) adaptive experiments at deployment time. This network represents a design policy which takes as input the data from previous steps, and outputs the next design using a single forward pass; these design decisions can be made in milliseconds during the live experiment. To train the network, we introduce contrastive information bounds that are suitable objectives for the sequential setting, and propose a customized network architecture that exploits key symmetries. We demonstrate that DAD successfully amortizes the process of experimental design, outperforming alternative strategies on a number of problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/foster21a/foster21a.pdf", "supp": "", "pdf_size": 563169, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8507220836791345595&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Statistics, University of Oxford; Department of Statistics, University of Oxford; Work undertaken whilst at the University of Oxford; Department of Statistics, University of Oxford", "aff_domain": "stats.ox.ac.uk; ; ; ", "email": "stats.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/foster21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Deep Coherent Exploration for Continuous Control", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9225", "id": "9225", "proceeding": "http://proceedings.mlr.press/v139/zhang21t.html", "slides": "/media/icml-2021/Slides/9225.pdf", "author_site": "Yijie Zhang, Herke van Hoof", "author": "Yijie Zhang; Herke Van Hoof", "abstract": "In policy search methods for reinforcement learning (RL), exploration is often performed by injecting noise either in action space at each step independently or in parameter space over each full trajectory. In prior work, it has been shown that with linear policies, a more balanced trade-off between these two exploration strategies is beneficial. However, that method did not scale to policies using deep neural networks. In this paper, we introduce deep coherent exploration, a general and scalable exploration framework for deep RL algorithms for continuous control, that generalizes step-based and trajectory-based exploration. This framework models the last layer parameters of the policy network as latent variables and uses a recursive inference step within the policy update to handle these latent variables in a scalable manner. We find that deep coherent exploration improves the speed and stability of learning of A2C, PPO, and SAC on several continuous control tasks.", "bibtex": "@InProceedings{pmlr-v139-zhang21t,\n title = \t {Deep Coherent Exploration for Continuous Control},\n author = {Zhang, Yijie and Van Hoof, Herke},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12567--12577},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21t/zhang21t.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21t.html},\n abstract = \t {In policy search methods for reinforcement learning (RL), exploration is often performed by injecting noise either in action space at each step independently or in parameter space over each full trajectory. In prior work, it has been shown that with linear policies, a more balanced trade-off between these two exploration strategies is beneficial. However, that method did not scale to policies using deep neural networks. In this paper, we introduce deep coherent exploration, a general and scalable exploration framework for deep RL algorithms for continuous control, that generalizes step-based and trajectory-based exploration. This framework models the last layer parameters of the policy network as latent variables and uses a recursive inference step within the policy update to handle these latent variables in a scalable manner. We find that deep coherent exploration improves the speed and stability of learning of A2C, PPO, and SAC on several continuous control tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21t/zhang21t.pdf", "supp": "", "pdf_size": 8296764, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3533811578867306615&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "University of Copenhagen, Copenhagen, Denmark (work done while YZ was a master student at the University of Amsterdam); University of Amsterdam, Amsterdam, the Netherlands", "aff_domain": "di.ku.dk; ", "email": "di.ku.dk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhang21t.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Copenhagen;University of Amsterdam", "aff_unique_dep": ";", "aff_unique_url": "https://www.ku.dk;https://www.uva.nl", "aff_unique_abbr": "UCPH;UvA", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Copenhagen;Amsterdam", "aff_country_unique_index": "0;1", "aff_country_unique": "Denmark;Netherlands" }, { "title": "Deep Continuous Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9169", "id": "9169", "proceeding": "http://proceedings.mlr.press/v139/tomen21a.html", "slides": "/media/icml-2021/Slides/9169.pdf", "author_site": "Nergis Tomen, Silvia-Laura Pintea, Jan van Gemert", "author": "Nergis Tomen; Silvia-Laura Pintea; Jan Van Gemert", "abstract": "CNNs and computational models of biological vision share some fundamental principles, which opened new avenues of research. However, fruitful cross-field research is hampered by conventional CNN architectures being based on spatially and depthwise discrete representations, which cannot accommodate certain aspects of biological complexity such as continuously varying receptive field sizes and dynamics of neuronal responses. Here we propose deep continuous networks (DCNs), which combine spatially continuous filters, with the continuous depth framework of neural ODEs. This allows us to learn the spatial support of the filters during training, as well as model the continuous evolution of feature maps, linking DCNs closely to biological models. We show that DCNs are versatile and highly applicable to standard image classification and reconstruction problems, where they improve parameter and data efficiency, and allow for meta-parametrization. We illustrate the biological plausibility of the scale distributions learned by DCNs and explore their performance in a neuroscientifically inspired pattern completion task. Finally, we investigate an efficient implementation of DCNs by changing input contrast.", "bibtex": "@InProceedings{pmlr-v139-tomen21a,\n title = \t {Deep Continuous Networks},\n author = {Tomen, Nergis and Pintea, Silvia-Laura and Van Gemert, Jan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10324--10335},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tomen21a/tomen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tomen21a.html},\n abstract = \t {CNNs and computational models of biological vision share some fundamental principles, which opened new avenues of research. However, fruitful cross-field research is hampered by conventional CNN architectures being based on spatially and depthwise discrete representations, which cannot accommodate certain aspects of biological complexity such as continuously varying receptive field sizes and dynamics of neuronal responses. Here we propose deep continuous networks (DCNs), which combine spatially continuous filters, with the continuous depth framework of neural ODEs. This allows us to learn the spatial support of the filters during training, as well as model the continuous evolution of feature maps, linking DCNs closely to biological models. We show that DCNs are versatile and highly applicable to standard image classification and reconstruction problems, where they improve parameter and data efficiency, and allow for meta-parametrization. We illustrate the biological plausibility of the scale distributions learned by DCNs and explore their performance in a neuroscientifically inspired pattern completion task. Finally, we investigate an efficient implementation of DCNs by changing input contrast.}\n}", "pdf": "http://proceedings.mlr.press/v139/tomen21a/tomen21a.pdf", "supp": "", "pdf_size": 4444709, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2388810081875178780&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Computer Vision Lab, Delft University of Technology, Delft, Netherlands; Computer Vision Lab, Delft University of Technology, Delft, Netherlands; Computer Vision Lab, Delft University of Technology, Delft, Netherlands", "aff_domain": "tudelft.nl; ; ", "email": "tudelft.nl; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tomen21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Delft University of Technology", "aff_unique_dep": "Computer Vision Lab", "aff_unique_url": "https://www.tudelft.nl", "aff_unique_abbr": "TUDelft", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Delft", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Netherlands" }, { "title": "Deep Generative Learning via Schr\u00f6dinger Bridge", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8715", "id": "8715", "proceeding": "http://proceedings.mlr.press/v139/wang21l.html", "slides": "", "author_site": "Gefei Wang, Yuling Jiao, Qian Xu, Yang Wang, Can Yang", "author": "Gefei Wang; Yuling Jiao; Qian Xu; Yang Wang; Can Yang", "abstract": "We propose to learn a generative model via entropy interpolation with a Schr{\u00f6}dinger Bridge. The generative learning task can be formulated as interpolating between a reference distribution and a target distribution based on the Kullback-Leibler divergence. At the population level, this entropy interpolation is characterized via an SDE on [0,1] with a time-varying drift term. At the sample level, we derive our Schr{\u00f6}dinger Bridge algorithm by plugging the drift term estimated by a deep score estimator and a deep density ratio estimator into the Euler-Maruyama method. Under some mild smoothness assumptions of the target distribution, we prove the consistency of both the score estimator and the density ratio estimator, and then establish the consistency of the proposed Schr{\u00f6}dinger Bridge approach. Our theoretical results guarantee that the distribution learned by our approach converges to the target distribution. Experimental results on multimodal synthetic data and benchmark data support our theoretical findings and indicate that the generative model via Schr{\u00f6}dinger Bridge is comparable with state-of-the-art GANs, suggesting a new formulation of generative learning. We demonstrate its usefulness in image interpolation and image inpainting.", "bibtex": "@InProceedings{pmlr-v139-wang21l,\n title = \t {Deep Generative Learning via Schr{\u00f6}dinger Bridge},\n author = {Wang, Gefei and Jiao, Yuling and Xu, Qian and Wang, Yang and Yang, Can},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10794--10804},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21l/wang21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21l.html},\n abstract = \t {We propose to learn a generative model via entropy interpolation with a Schr{\u00f6}dinger Bridge. The generative learning task can be formulated as interpolating between a reference distribution and a target distribution based on the Kullback-Leibler divergence. At the population level, this entropy interpolation is characterized via an SDE on [0,1] with a time-varying drift term. At the sample level, we derive our Schr{\u00f6}dinger Bridge algorithm by plugging the drift term estimated by a deep score estimator and a deep density ratio estimator into the Euler-Maruyama method. Under some mild smoothness assumptions of the target distribution, we prove the consistency of both the score estimator and the density ratio estimator, and then establish the consistency of the proposed Schr{\u00f6}dinger Bridge approach. Our theoretical results guarantee that the distribution learned by our approach converges to the target distribution. Experimental results on multimodal synthetic data and benchmark data support our theoretical findings and indicate that the generative model via Schr{\u00f6}dinger Bridge is comparable with state-of-the-art GANs, suggesting a new formulation of generative learning. We demonstrate its usefulness in image interpolation and image inpainting.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21l/wang21l.pdf", "supp": "", "pdf_size": 4683493, "gs_citation": 130, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6809208225611743040&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wang21l.html" }, { "title": "Deep Latent Graph Matching", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8835", "id": "8835", "proceeding": "http://proceedings.mlr.press/v139/yu21d.html", "slides": "", "author_site": "Tianshu Yu, Runzhong Wang, Junchi Yan, baoxin Li", "author": "Tianshu Yu; Runzhong Wang; Junchi Yan; Baoxin Li", "abstract": "Deep learning for graph matching (GM) has emerged as an important research topic due to its superior performance over traditional methods and insights it provides for solving other combinatorial problems on graph. While recent deep methods for GM extensively investigated effective node/edge feature learning or downstream GM solvers given such learned features, there is little existing work questioning if the fixed connectivity/topology typically constructed using heuristics (e.g., Delaunay or k-nearest) is indeed suitable for GM. From a learning perspective, we argue that the fixed topology may restrict the model capacity and thus potentially hinder the performance. To address this, we propose to learn the (distribution of) latent topology, which can better support the downstream GM task. We devise two latent graph generation procedures, one deterministic and one generative. Particularly, the generative procedure emphasizes the across-graph consistency and thus can be viewed as a matching-guided co-generative model. Our methods deliver superior performance over previous state-of-the-arts on public benchmarks, hence supporting our hypothesis.", "bibtex": "@InProceedings{pmlr-v139-yu21d,\n title = \t {Deep Latent Graph Matching},\n author = {Yu, Tianshu and Wang, Runzhong and Yan, Junchi and Li, Baoxin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12187--12197},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21d/yu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21d.html},\n abstract = \t {Deep learning for graph matching (GM) has emerged as an important research topic due to its superior performance over traditional methods and insights it provides for solving other combinatorial problems on graph. While recent deep methods for GM extensively investigated effective node/edge feature learning or downstream GM solvers given such learned features, there is little existing work questioning if the fixed connectivity/topology typically constructed using heuristics (e.g., Delaunay or k-nearest) is indeed suitable for GM. From a learning perspective, we argue that the fixed topology may restrict the model capacity and thus potentially hinder the performance. To address this, we propose to learn the (distribution of) latent topology, which can better support the downstream GM task. We devise two latent graph generation procedures, one deterministic and one generative. Particularly, the generative procedure emphasizes the across-graph consistency and thus can be viewed as a matching-guided co-generative model. Our methods deliver superior performance over previous state-of-the-arts on public benchmarks, hence supporting our hypothesis.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21d/yu21d.pdf", "supp": "", "pdf_size": 1153569, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11034230794267255922&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Arizona State University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Arizona State University", "aff_domain": "asu.edu; ; ;asu.edu", "email": "asu.edu; ; ;asu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yu21d.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "Arizona State University;Shanghai Jiao Tong University", "aff_unique_dep": ";", "aff_unique_url": "https://www.asu.edu;https://www.sjtu.edu.cn", "aff_unique_abbr": "ASU;SJTU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United States;China" }, { "title": "Deep Learning for Functional Data Analysis with Adaptive Basis Layers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9717", "id": "9717", "proceeding": "http://proceedings.mlr.press/v139/yao21c.html", "slides": "/media/icml-2021/Slides/9717.pdf", "author_site": "Junwen Yao, Jonas Mueller, Jane-Ling Wang", "author": "Junwen Yao; Jonas Mueller; Jane-Ling Wang", "abstract": "Despite their widespread success, the application of deep neural networks to functional data remains scarce today. The infinite dimensionality of functional data means standard learning algorithms can be applied only after appropriate dimension reduction, typically achieved via basis expansions. Currently, these bases are chosen a priori without the information for the task at hand and thus may not be effective for the designated task. We instead propose to adaptively learn these bases in an end-to-end fashion. We introduce neural networks that employ a new Basis Layer whose hidden units are each basis functions themselves implemented as a micro neural network. Our architecture learns to apply parsimonious dimension reduction to functional inputs that focuses only on information relevant to the target rather than irrelevant variation in the input function. Across numerous classification/regression tasks with functional data, our method empirically outperforms other types of neural networks, and we prove that our approach is statistically consistent with low generalization error.", "bibtex": "@InProceedings{pmlr-v139-yao21c,\n title = \t {Deep Learning for Functional Data Analysis with Adaptive Basis Layers},\n author = {Yao, Junwen and Mueller, Jonas and Wang, Jane-Ling},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11898--11908},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yao21c/yao21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yao21c.html},\n abstract = \t {Despite their widespread success, the application of deep neural networks to functional data remains scarce today. The infinite dimensionality of functional data means standard learning algorithms can be applied only after appropriate dimension reduction, typically achieved via basis expansions. Currently, these bases are chosen a priori without the information for the task at hand and thus may not be effective for the designated task. We instead propose to adaptively learn these bases in an end-to-end fashion. We introduce neural networks that employ a new Basis Layer whose hidden units are each basis functions themselves implemented as a micro neural network. Our architecture learns to apply parsimonious dimension reduction to functional inputs that focuses only on information relevant to the target rather than irrelevant variation in the input function. Across numerous classification/regression tasks with functional data, our method empirically outperforms other types of neural networks, and we prove that our approach is statistically consistent with low generalization error.}\n}", "pdf": "http://proceedings.mlr.press/v139/yao21c/yao21c.pdf", "supp": "", "pdf_size": 618598, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17144943362411304273&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "UC Davis; Amazon (work done prior to joining Amazon); UC Davis", "aff_domain": "ucdavis.edu; ; ", "email": "ucdavis.edu; ; ", "github": "https://github.com/jwyyy/AdaFNN", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yao21c.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Davis;Amazon", "aff_unique_dep": ";Amazon.com, Inc.", "aff_unique_url": "https://www.ucdavis.edu;https://www.amazon.com", "aff_unique_abbr": "UC Davis;Amazon", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Davis;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Deep Reinforcement Learning amidst Continual Structured Non-Stationarity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10467", "id": "10467", "proceeding": "http://proceedings.mlr.press/v139/xie21c.html", "slides": "", "author_site": "Annie Xie, James Harrison, Chelsea Finn", "author": "Annie Xie; James Harrison; Chelsea Finn", "abstract": "As humans, our goals and our environment are persistently changing throughout our lifetime based on our experiences, actions, and internal and external drives. In contrast, typical reinforcement learning problem set-ups consider decision processes that are stationary across episodes. Can we develop reinforcement learning algorithms that can cope with the persistent change in the former, more realistic problem settings? While on-policy algorithms such as policy gradients in principle can be extended to non-stationary settings, the same cannot be said for more efficient off-policy algorithms that replay past experiences when learning. In this work, we formalize this problem setting, and draw upon ideas from the online learning and probabilistic inference literature to derive an off-policy RL algorithm that can reason about and tackle such lifelong non-stationarity. Our method leverages latent variable models to learn a representation of the environment from current and past experiences, and performs off-policy RL with this representation. We further introduce several simulation environments that exhibit lifelong non-stationarity, and empirically find that our approach substantially outperforms approaches that do not reason about environment shift.", "bibtex": "@InProceedings{pmlr-v139-xie21c,\n title = \t {Deep Reinforcement Learning amidst Continual Structured Non-Stationarity},\n author = {Xie, Annie and Harrison, James and Finn, Chelsea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11393--11403},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21c/xie21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21c.html},\n abstract = \t {As humans, our goals and our environment are persistently changing throughout our lifetime based on our experiences, actions, and internal and external drives. In contrast, typical reinforcement learning problem set-ups consider decision processes that are stationary across episodes. Can we develop reinforcement learning algorithms that can cope with the persistent change in the former, more realistic problem settings? While on-policy algorithms such as policy gradients in principle can be extended to non-stationary settings, the same cannot be said for more efficient off-policy algorithms that replay past experiences when learning. In this work, we formalize this problem setting, and draw upon ideas from the online learning and probabilistic inference literature to derive an off-policy RL algorithm that can reason about and tackle such lifelong non-stationarity. Our method leverages latent variable models to learn a representation of the environment from current and past experiences, and performs off-policy RL with this representation. We further introduce several simulation environments that exhibit lifelong non-stationarity, and empirically find that our approach substantially outperforms approaches that do not reason about environment shift.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21c/xie21c.pdf", "supp": "", "pdf_size": 1882905, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3565415669382979221&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xie21c.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Deep kernel processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9135", "id": "9135", "proceeding": "http://proceedings.mlr.press/v139/aitchison21a.html", "slides": "", "author_site": "Laurence Aitchison, Adam Yang, Sebastian Ober", "author": "Laurence Aitchison; Adam Yang; Sebastian W. Ober", "abstract": "We define deep kernel processes in which positive definite Gram matrices are progressively transformed by nonlinear kernel functions and by sampling from (inverse) Wishart distributions. Remarkably, we find that deep Gaussian processes (DGPs), Bayesian neural networks (BNNs), infinite BNNs, and infinite BNNs with bottlenecks can all be written as deep kernel processes. For DGPs the equivalence arises because the Gram matrix formed by the inner product of features is Wishart distributed, and as we show, standard isotropic kernels can be written entirely in terms of this Gram matrix \u2014 we do not need knowledge of the underlying features. We define a tractable deep kernel process, the deep inverse Wishart process, and give a doubly-stochastic inducing-point variational inference scheme that operates on the Gram matrices, not on the features, as in DGPs. We show that the deep inverse Wishart process gives superior performance to DGPs and infinite BNNs on fully-connected baselines.", "bibtex": "@InProceedings{pmlr-v139-aitchison21a,\n title = \t {Deep Kernel Processes},\n author = {Aitchison, Laurence and Yang, Adam and Ober, Sebastian W},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {130--140},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/aitchison21a/aitchison21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/aitchison21a.html},\n abstract = \t {We define deep kernel processes in which positive definite Gram matrices are progressively transformed by nonlinear kernel functions and by sampling from (inverse) Wishart distributions. Remarkably, we find that deep Gaussian processes (DGPs), Bayesian neural networks (BNNs), infinite BNNs, and infinite BNNs with bottlenecks can all be written as deep kernel processes. For DGPs the equivalence arises because the Gram matrix formed by the inner product of features is Wishart distributed, and as we show, standard isotropic kernels can be written entirely in terms of this Gram matrix \u2014 we do not need knowledge of the underlying features. We define a tractable deep kernel process, the deep inverse Wishart process, and give a doubly-stochastic inducing-point variational inference scheme that operates on the Gram matrices, not on the features, as in DGPs. We show that the deep inverse Wishart process gives superior performance to DGPs and infinite BNNs on fully-connected baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/aitchison21a/aitchison21a.pdf", "supp": "", "pdf_size": 222334, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7903149479727507202&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Bristol, BS8 1UB, UK; Department of Computer Science, Bristol, BS8 1UB, UK; Department of Engineering, Cambridge, CB2 1PZ, UK", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "github.com/LaurenceA/bayesfunc", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/aitchison21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Bristol;University of Cambridge", "aff_unique_dep": "Department of Computer Science;Department of Engineering", "aff_unique_url": "https://www.bristol.ac.uk;https://www.cam.ac.uk", "aff_unique_abbr": "UoB;Cambridge", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Bristol;Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "DeepReDuce: ReLU Reduction for Fast Private Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9709", "id": "9709", "proceeding": "http://proceedings.mlr.press/v139/jha21a.html", "slides": "/media/icml-2021/Slides/9709.pdf", "author_site": "Nandan Kumar Jha, Zahra Ghodsi, Siddharth Garg, Brandon Reagen", "author": "Nandan Kumar Jha; Zahra Ghodsi; Siddharth Garg; Brandon Reagen", "abstract": "The recent rise of privacy concerns has led researchers to devise methods for private neural inference\u2014where inferences are made directly on encrypted data, never seeing inputs. The primary challenge facing private inference is that computing on encrypted data levies an impractically-high latency penalty, stemming mostly from non-linear operators like ReLU. Enabling practical and private inference requires new optimization methods that minimize network ReLU counts while preserving accuracy. This paper proposes DeepReDuce: a set of optimizations for the judicious removal of ReLUs to reduce private inference latency. The key insight is that not all ReLUs contribute equally to accuracy. We leverage this insight to drop, or remove, ReLUs from classic networks to significantly reduce inference latency and maintain high accuracy. Given a network architecture, DeepReDuce outputs a Pareto frontier of networks that tradeoff the number of ReLUs and accuracy. Compared to the state-of-the-art for private inference DeepReDuce improves accuracy and reduces ReLU count by up to 3.5% (iso-ReLU count) and 3.5x (iso-accuracy), respectively.", "bibtex": "@InProceedings{pmlr-v139-jha21a,\n title = \t {DeepReDuce: ReLU Reduction for Fast Private Inference},\n author = {Jha, Nandan Kumar and Ghodsi, Zahra and Garg, Siddharth and Reagen, Brandon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4839--4849},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jha21a/jha21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jha21a.html},\n abstract = \t {The recent rise of privacy concerns has led researchers to devise methods for private neural inference\u2014where inferences are made directly on encrypted data, never seeing inputs. The primary challenge facing private inference is that computing on encrypted data levies an impractically-high latency penalty, stemming mostly from non-linear operators like ReLU. Enabling practical and private inference requires new optimization methods that minimize network ReLU counts while preserving accuracy. This paper proposes DeepReDuce: a set of optimizations for the judicious removal of ReLUs to reduce private inference latency. The key insight is that not all ReLUs contribute equally to accuracy. We leverage this insight to drop, or remove, ReLUs from classic networks to significantly reduce inference latency and maintain high accuracy. Given a network architecture, DeepReDuce outputs a Pareto frontier of networks that tradeoff the number of ReLUs and accuracy. Compared to the state-of-the-art for private inference DeepReDuce improves accuracy and reduces ReLU count by up to 3.5% (iso-ReLU count) and 3.5x (iso-accuracy), respectively.}\n}", "pdf": "http://proceedings.mlr.press/v139/jha21a/jha21a.pdf", "supp": "", "pdf_size": 425594, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11088238167218760023&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "New York University, New York, USA; New York University, New York, USA; New York University, New York, USA; New York University, New York, USA", "aff_domain": "nyu.edu; ; ; ", "email": "nyu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jha21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "New York University", "aff_unique_dep": "", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "DeepWalking Backwards: From Embeddings Back to Graphs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9715", "id": "9715", "proceeding": "http://proceedings.mlr.press/v139/chanpuriya21a.html", "slides": "", "author_site": "Sudhanshu Chanpuriya, Cameron Musco, Konstantinos Sotiropoulos, Charalampos Tsourakakis", "author": "Sudhanshu Chanpuriya; Cameron Musco; Konstantinos Sotiropoulos; Charalampos Tsourakakis", "abstract": "Low-dimensional node embeddings play a key role in analyzing graph datasets. However, little work studies exactly what information is encoded by popular embedding methods, and how this information correlates with performance in downstream learning tasks. We tackle this question by studying whether embeddings can be inverted to (approximately) recover the graph used to generate them. Focusing on a variant of the popular DeepWalk method \\cite{PerozziAl-RfouSkiena:2014, QiuDongMa:2018}, we present algorithms for accurate embedding inversion \u2013 i.e., from the low-dimensional embedding of a graph $G$, we can find a graph $\\tilde G$ with a very similar embedding. We perform numerous experiments on real-world networks, observing that significant information about $G$, such as specific edges and bulk properties like triangle density, is often lost in $\\tilde G$. However, community structure is often preserved or even enhanced. Our findings are a step towards a more rigorous understanding of exactly what information embeddings encode about the input graph, and why this information is useful for learning tasks.", "bibtex": "@InProceedings{pmlr-v139-chanpuriya21a,\n title = \t {DeepWalking Backwards: From Embeddings Back to Graphs},\n author = {Chanpuriya, Sudhanshu and Musco, Cameron and Sotiropoulos, Konstantinos and Tsourakakis, Charalampos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1473--1483},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chanpuriya21a/chanpuriya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chanpuriya21a.html},\n abstract = \t {Low-dimensional node embeddings play a key role in analyzing graph datasets. However, little work studies exactly what information is encoded by popular embedding methods, and how this information correlates with performance in downstream learning tasks. We tackle this question by studying whether embeddings can be inverted to (approximately) recover the graph used to generate them. Focusing on a variant of the popular DeepWalk method \\cite{PerozziAl-RfouSkiena:2014, QiuDongMa:2018}, we present algorithms for accurate embedding inversion \u2013 i.e., from the low-dimensional embedding of a graph $G$, we can find a graph $\\tilde G$ with a very similar embedding. We perform numerous experiments on real-world networks, observing that significant information about $G$, such as specific edges and bulk properties like triangle density, is often lost in $\\tilde G$. However, community structure is often preserved or even enhanced. Our findings are a step towards a more rigorous understanding of exactly what information embeddings encode about the input graph, and why this information is useful for learning tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/chanpuriya21a/chanpuriya21a.pdf", "supp": "", "pdf_size": 1049447, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=367308941848540342&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, MA, USA; College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, MA, USA; Department of Computer Science, Boston University, Boston, MA, USA; Department of Computer Science, Boston University, Boston, MA, USA + ISI Foundation, Turin, Italy", "aff_domain": "umass; ; ; ", "email": "umass; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chanpuriya21a.html", "aff_unique_index": "0;0;1;1+2", "aff_unique_norm": "University of Massachusetts Amherst;Boston University;ISI Foundation", "aff_unique_dep": "College of Information and Computer Sciences;Department of Computer Science;", "aff_unique_url": "https://www.umass.edu;https://www.bu.edu;https://www.isifoundation.it", "aff_unique_abbr": "UMass Amherst;BU;", "aff_campus_unique_index": "0;0;1;1+2", "aff_campus_unique": "Amherst;Boston;Turin", "aff_country_unique_index": "0;0;0;0+1", "aff_country_unique": "United States;Italy" }, { "title": "Deeply-Debiased Off-Policy Interval Estimation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10481", "id": "10481", "proceeding": "http://proceedings.mlr.press/v139/shi21d.html", "slides": "", "author_site": "Chengchun Shi, Runzhe Wan, Victor Chernozhukov, Rui Song", "author": "Chengchun Shi; Runzhe Wan; Victor Chernozhukov; Rui Song", "abstract": "Off-policy evaluation learns a target policy\u2019s value with a historical dataset generated by a different behavior policy. In addition to a point estimate, many applications would benefit significantly from having a confidence interval (CI) that quantifies the uncertainty of the point estimate. In this paper, we propose a novel procedure to construct an efficient, robust, and flexible CI on a target policy\u2019s value. Our method is justified by theoretical results and numerical experiments. A Python implementation of the proposed procedure is available at https://github.com/ RunzheStat/D2OPE.", "bibtex": "@InProceedings{pmlr-v139-shi21d,\n title = \t {Deeply-Debiased Off-Policy Interval Estimation},\n author = {Shi, Chengchun and Wan, Runzhe and Chernozhukov, Victor and Song, Rui},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9580--9591},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shi21d/shi21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/shi21d.html},\n abstract = \t {Off-policy evaluation learns a target policy\u2019s value with a historical dataset generated by a different behavior policy. In addition to a point estimate, many applications would benefit significantly from having a confidence interval (CI) that quantifies the uncertainty of the point estimate. In this paper, we propose a novel procedure to construct an efficient, robust, and flexible CI on a target policy\u2019s value. Our method is justified by theoretical results and numerical experiments. A Python implementation of the proposed procedure is available at https://github.com/ RunzheStat/D2OPE.}\n}", "pdf": "http://proceedings.mlr.press/v139/shi21d/shi21d.pdf", "supp": "", "pdf_size": 486457, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16793961424384021624&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Statistics, London School of Economics and Political Science, London, United Kingdom; Department of Statistics, North Carolina State University, Raleigh, USA; Department of Economics, Massachusetts Institute of Technology, Cambridge, USA; Department of Statistics, North Carolina State University, Raleigh, USA", "aff_domain": "ncsu.edu; ; ;ncsu.edu", "email": "ncsu.edu; ; ;ncsu.edu", "github": "https://github.com/RunzheStat/D2OPE", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/shi21d.html", "aff_unique_index": "0;1;2;1", "aff_unique_norm": "London School of Economics and Political Science;North Carolina State University;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Statistics;Department of Statistics;Department of Economics", "aff_unique_url": "https://www.lse.ac.uk;https://www.ncsu.edu;https://web.mit.edu", "aff_unique_abbr": "LSE;NCSU;MIT", "aff_campus_unique_index": "0;1;2;1", "aff_campus_unique": "London;Raleigh;Cambridge", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Defense against backdoor attacks via robust covariance estimation", "author": "Jonathan Hayase, Weihao Kong, Raghav Somani, Sewoong Oh", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9013", "id": "9013", "proceeding": "http://proceedings.mlr.press/v139/hayase21a.html", "slides": "" }, { "title": "Delving into Deep Imbalanced Regression", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9493", "id": "9493", "proceeding": "http://proceedings.mlr.press/v139/yang21m.html", "slides": "/media/icml-2021/Slides/9493.pdf", "author_site": "Yuzhe Yang, Kaiwen Zha, YINGCONG CHEN, Hao Wang, Dina Katabi", "author": "Yuzhe Yang; Kaiwen Zha; Yingcong Chen; Hao Wang; Dina Katabi", "abstract": "Real-world data often exhibit imbalanced distributions, where certain target values have significantly fewer observations. Existing techniques for dealing with imbalanced data focus on targets with categorical indices, i.e., different classes. However, many tasks involve continuous targets, where hard boundaries between classes do not exist. We define Deep Imbalanced Regression (DIR) as learning from such imbalanced data with continuous targets, dealing with potential missing data for certain target values, and generalizing to the entire target range. Motivated by the intrinsic difference between categorical and continuous label space, we propose distribution smoothing for both labels and features, which explicitly acknowledges the effects of nearby targets, and calibrates both label and learned feature distributions. We curate and benchmark large-scale DIR datasets from common real-world tasks in computer vision, natural language processing, and healthcare domains. Extensive experiments verify the superior performance of our strategies. Our work fills the gap in benchmarks and techniques for practical imbalanced regression problems. Code and data are available at: https://github.com/YyzHarry/imbalanced-regression.", "bibtex": "@InProceedings{pmlr-v139-yang21m,\n title = \t {Delving into Deep Imbalanced Regression},\n author = {Yang, Yuzhe and Zha, Kaiwen and Chen, Yingcong and Wang, Hao and Katabi, Dina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11842--11851},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21m/yang21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21m.html},\n abstract = \t {Real-world data often exhibit imbalanced distributions, where certain target values have significantly fewer observations. Existing techniques for dealing with imbalanced data focus on targets with categorical indices, i.e., different classes. However, many tasks involve continuous targets, where hard boundaries between classes do not exist. We define Deep Imbalanced Regression (DIR) as learning from such imbalanced data with continuous targets, dealing with potential missing data for certain target values, and generalizing to the entire target range. Motivated by the intrinsic difference between categorical and continuous label space, we propose distribution smoothing for both labels and features, which explicitly acknowledges the effects of nearby targets, and calibrates both label and learned feature distributions. We curate and benchmark large-scale DIR datasets from common real-world tasks in computer vision, natural language processing, and healthcare domains. Extensive experiments verify the superior performance of our strategies. Our work fills the gap in benchmarks and techniques for practical imbalanced regression problems. Code and data are available at: https://github.com/YyzHarry/imbalanced-regression.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21m/yang21m.pdf", "supp": "", "pdf_size": 3482767, "gs_citation": 415, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14041915448985010978&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory; Department of Computer Science, Rutgers University; MIT Computer Science & Artificial Intelligence Laboratory", "aff_domain": "mit.edu; ; ; ; ", "email": "mit.edu; ; ; ; ", "github": "https://github.com/YyzHarry/imbalanced-regression", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yang21m.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Rutgers University", "aff_unique_dep": "Computer Science & Artificial Intelligence Laboratory;Department of Computer Science", "aff_unique_url": "https://www.csail.mit.edu;https://www.rutgers.edu", "aff_unique_abbr": "MIT CSAIL;Rutgers", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Demonstration-Conditioned Reinforcement Learning for Few-Shot Imitation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10007", "id": "10007", "proceeding": "http://proceedings.mlr.press/v139/dance21a.html", "slides": "/media/icml-2021/Slides/10007.pdf", "author_site": "Christopher Dance, Perez Julien, Th\u00e9o Cachet", "author": "Christopher R. Dance; Julien Perez; Th\u00e9o Cachet", "abstract": "In few-shot imitation, an agent is given a few demonstrations of a previously unseen task, and must then successfully perform that task. We propose a novel approach to learning few-shot-imitation agents that we call demonstration-conditioned reinforcement learning (DCRL). Given a training set consisting of demonstrations, reward functions and transition distributions for multiple tasks, the idea is to work with a policy that takes demonstrations as input, and to train this policy to maximize the average of the cumulative reward over the set of training tasks. Relative to previously proposed few-shot imitation methods that use behaviour cloning or infer reward functions from demonstrations, our method has the disadvantage that it requires reward functions at training time. However, DCRL also has several advantages, such as the ability to improve upon suboptimal demonstrations, to operate given state-only demonstrations, and to cope with a domain shift between the demonstrator and the agent. Moreover, we show that DCRL outperforms methods based on behaviour cloning by a large margin, on navigation tasks and on robotic manipulation tasks from the Meta-World benchmark.", "bibtex": "@InProceedings{pmlr-v139-dance21a,\n title = \t {Demonstration-Conditioned Reinforcement Learning for Few-Shot Imitation},\n author = {Dance, Christopher R. and Perez, Julien and Cachet, Th{\\'e}o},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2376--2387},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dance21a/dance21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dance21a.html},\n abstract = \t {In few-shot imitation, an agent is given a few demonstrations of a previously unseen task, and must then successfully perform that task. We propose a novel approach to learning few-shot-imitation agents that we call demonstration-conditioned reinforcement learning (DCRL). Given a training set consisting of demonstrations, reward functions and transition distributions for multiple tasks, the idea is to work with a policy that takes demonstrations as input, and to train this policy to maximize the average of the cumulative reward over the set of training tasks. Relative to previously proposed few-shot imitation methods that use behaviour cloning or infer reward functions from demonstrations, our method has the disadvantage that it requires reward functions at training time. However, DCRL also has several advantages, such as the ability to improve upon suboptimal demonstrations, to operate given state-only demonstrations, and to cope with a domain shift between the demonstrator and the agent. Moreover, we show that DCRL outperforms methods based on behaviour cloning by a large margin, on navigation tasks and on robotic manipulation tasks from the Meta-World benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/dance21a/dance21a.pdf", "supp": "", "pdf_size": 904375, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "NAVER LABS Europe; NAVER LABS Europe; NAVER LABS Europe", "aff_domain": "naverlabs.com; ; ", "email": "naverlabs.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dance21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "NAVER LABS", "aff_unique_dep": "Europe", "aff_unique_url": "https://www.naverlabs.eu", "aff_unique_abbr": "NAVER LABS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "Demystifying Inductive Biases for (Beta-)VAE Based Architectures", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9281", "id": "9281", "proceeding": "http://proceedings.mlr.press/v139/zietlow21a.html", "slides": "/media/icml-2021/Slides/9281.pdf", "author_site": "Dominik Zietlow, Michal Rolinek, Georg Martius", "author": "Dominik Zietlow; Michal Rolinek; Georg Martius", "abstract": "The performance of Beta-Variational-Autoencoders and their variants on learning semantically meaningful, disentangled representations is unparalleled. On the other hand, there are theoretical arguments suggesting the impossibility of unsupervised disentanglement. In this work, we shed light on the inductive bias responsible for the success of VAE-based architectures. We show that in classical datasets the structure of variance, induced by the generating factors, is conveniently aligned with the latent directions fostered by the VAE objective. This builds the pivotal bias on which the disentangling abilities of VAEs rely. By small, elaborate perturbations of existing datasets, we hide the convenient correlation structure that is easily exploited by a variety of architectures. To demonstrate this, we construct modified versions of standard datasets in which (i) the generative factors are perfectly preserved; (ii) each image undergoes a mild transformation causing a small change of variance; (iii) the leading VAE-based disentanglement architectures fail to produce disentangled representations whilst the performance of a non-variational method remains unchanged.", "bibtex": "@InProceedings{pmlr-v139-zietlow21a,\n title = \t {Demystifying Inductive Biases for (Beta-)VAE Based Architectures},\n author = {Zietlow, Dominik and Rolinek, Michal and Martius, Georg},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12945--12954},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zietlow21a/zietlow21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zietlow21a.html},\n abstract = \t {The performance of Beta-Variational-Autoencoders and their variants on learning semantically meaningful, disentangled representations is unparalleled. On the other hand, there are theoretical arguments suggesting the impossibility of unsupervised disentanglement. In this work, we shed light on the inductive bias responsible for the success of VAE-based architectures. We show that in classical datasets the structure of variance, induced by the generating factors, is conveniently aligned with the latent directions fostered by the VAE objective. This builds the pivotal bias on which the disentangling abilities of VAEs rely. By small, elaborate perturbations of existing datasets, we hide the convenient correlation structure that is easily exploited by a variety of architectures. To demonstrate this, we construct modified versions of standard datasets in which (i) the generative factors are perfectly preserved; (ii) each image undergoes a mild transformation causing a small change of variance; (iii) the leading VAE-based disentanglement architectures fail to produce disentangled representations whilst the performance of a non-variational method remains unchanged.}\n}", "pdf": "http://proceedings.mlr.press/v139/zietlow21a/zietlow21a.pdf", "supp": "", "pdf_size": 2153695, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13434932195719282682&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "tue.mpg.de; ; ", "email": "tue.mpg.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zietlow21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Max Planck Institute for Intelligent Systems", "aff_unique_dep": "", "aff_unique_url": "https://www.mpi-is.mpg.de", "aff_unique_abbr": "MPI-IS", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "T\u00fcbingen", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Dense for the Price of Sparse: Improved Performance of Sparsely Initialized Networks via a Subspace Offset", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9247", "id": "9247", "proceeding": "http://proceedings.mlr.press/v139/price21a.html", "slides": "", "author_site": "Ilan Price, Jared Tanner", "author": "Ilan Price; Jared Tanner", "abstract": "That neural networks may be pruned to high sparsities and retain high accuracy is well established. Recent research efforts focus on pruning immediately after initialization so as to allow the computational savings afforded by sparsity to extend to the training process. In this work, we introduce a new \u2018DCT plus Sparse\u2019 layer architecture, which maintains information propagation and trainability even with as little as 0.01% trainable parameters remaining. We show that standard training of networks built with these layers, and pruned at initialization, achieves state-of-the-art accuracy for extreme sparsities on a variety of benchmark network architectures and datasets. Moreover, these results are achieved using only simple heuristics to determine the locations of the trainable parameters in the network, and thus without having to initially store or compute with the full, unpruned network, as is required by competing prune-at-initialization algorithms. Switching from standard sparse layers to DCT plus Sparse layers does not increase the storage footprint of a network and incurs only a small additional computational overhead.", "bibtex": "@InProceedings{pmlr-v139-price21a,\n title = \t {Dense for the Price of Sparse: Improved Performance of Sparsely Initialized Networks via a Subspace Offset},\n author = {Price, Ilan and Tanner, Jared},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8620--8629},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/price21a/price21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/price21a.html},\n abstract = \t {That neural networks may be pruned to high sparsities and retain high accuracy is well established. Recent research efforts focus on pruning immediately after initialization so as to allow the computational savings afforded by sparsity to extend to the training process. In this work, we introduce a new \u2018DCT plus Sparse\u2019 layer architecture, which maintains information propagation and trainability even with as little as 0.01% trainable parameters remaining. We show that standard training of networks built with these layers, and pruned at initialization, achieves state-of-the-art accuracy for extreme sparsities on a variety of benchmark network architectures and datasets. Moreover, these results are achieved using only simple heuristics to determine the locations of the trainable parameters in the network, and thus without having to initially store or compute with the full, unpruned network, as is required by competing prune-at-initialization algorithms. Switching from standard sparse layers to DCT plus Sparse layers does not increase the storage footprint of a network and incurs only a small additional computational overhead.}\n}", "pdf": "http://proceedings.mlr.press/v139/price21a/price21a.pdf", "supp": "", "pdf_size": 1953157, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17879749331929716913&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Mathematical Institute, University of Oxford, Oxford, UK + The Alan Turing Institute, London, UK; Mathematical Institute, University of Oxford, Oxford, UK + The Alan Turing Institute, London, UK", "aff_domain": "maths.ox.ac.uk; ", "email": "maths.ox.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/price21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "University of Oxford;Alan Turing Institute", "aff_unique_dep": "Mathematical Institute;", "aff_unique_url": "https://www.ox.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "Oxford;ATI", "aff_campus_unique_index": "0+1;0+1", "aff_campus_unique": "Oxford;London", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Density Constrained Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8985", "id": "8985", "proceeding": "http://proceedings.mlr.press/v139/qin21a.html", "slides": "", "author_site": "Zengyi Qin, Yuxiao Chen, Chuchu Fan", "author": "Zengyi Qin; Yuxiao Chen; Chuchu Fan", "abstract": "We study constrained reinforcement learning (CRL) from a novel perspective by setting constraints directly on state density functions, rather than the value functions considered by previous works. State density has a clear physical and mathematical interpretation, and is able to express a wide variety of constraints such as resource limits and safety requirements. Density constraints can also avoid the time-consuming process of designing and tuning cost functions required by value function-based constraints to encode system specifications. We leverage the duality between density functions and Q functions to develop an effective algorithm to solve the density constrained RL problem optimally and the constrains are guaranteed to be satisfied. We prove that the proposed algorithm converges to a near-optimal solution with a bounded error even when the policy update is imperfect. We use a set of comprehensive experiments to demonstrate the advantages of our approach over state-of-the-art CRL methods, with a wide range of density constrained tasks as well as standard CRL benchmarks such as Safety-Gym.", "bibtex": "@InProceedings{pmlr-v139-qin21a,\n title = \t {Density Constrained Reinforcement Learning},\n author = {Qin, Zengyi and Chen, Yuxiao and Fan, Chuchu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8682--8692},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qin21a/qin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/qin21a.html},\n abstract = \t {We study constrained reinforcement learning (CRL) from a novel perspective by setting constraints directly on state density functions, rather than the value functions considered by previous works. State density has a clear physical and mathematical interpretation, and is able to express a wide variety of constraints such as resource limits and safety requirements. Density constraints can also avoid the time-consuming process of designing and tuning cost functions required by value function-based constraints to encode system specifications. We leverage the duality between density functions and Q functions to develop an effective algorithm to solve the density constrained RL problem optimally and the constrains are guaranteed to be satisfied. We prove that the proposed algorithm converges to a near-optimal solution with a bounded error even when the policy update is imperfect. We use a set of comprehensive experiments to demonstrate the advantages of our approach over state-of-the-art CRL methods, with a wide range of density constrained tasks as well as standard CRL benchmarks such as Safety-Gym.}\n}", "pdf": "http://proceedings.mlr.press/v139/qin21a/qin21a.pdf", "supp": "", "pdf_size": 2638531, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10915944181485029356&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Massachusetts Institute of Technology; California Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu; ;mit.edu", "email": "mit.edu; ;mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/qin21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;California Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://web.mit.edu;https://www.caltech.edu", "aff_unique_abbr": "MIT;Caltech", "aff_campus_unique_index": "1", "aff_campus_unique": ";Pasadena", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Descending through a Crowded Valley - Benchmarking Deep Learning Optimizers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8645", "id": "8645", "proceeding": "http://proceedings.mlr.press/v139/schmidt21a.html", "slides": "/media/icml-2021/Slides/8645.pdf", "author_site": "Robin M Schmidt, Frank Schneider, Philipp Hennig", "author": "Robin M Schmidt; Frank Schneider; Philipp Hennig", "abstract": "Choosing the optimizer is considered to be among the most crucial design decisions in deep learning, and it is not an easy one. The growing literature now lists hundreds of optimization methods. In the absence of clear theoretical guidance and conclusive empirical evidence, the decision is often made based on anecdotes. In this work, we aim to replace these anecdotes, if not with a conclusive ranking, then at least with evidence-backed heuristics. To do so, we perform an extensive, standardized benchmark of fifteen particularly popular deep learning optimizers while giving a concise overview of the wide range of possible choices. Analyzing more than 50,000 individual runs, we contribute the following three points: (i) Optimizer performance varies greatly across tasks. (ii) We observe that evaluating multiple optimizers with default parameters works approximately as well as tuning the hyperparameters of a single, fixed optimizer. (iii) While we cannot discern an optimization method clearly dominating across all tested tasks, we identify a significantly reduced subset of specific optimizers and parameter choices that generally lead to competitive results in our experiments: Adam remains a strong contender, with newer methods failing to significantly and consistently outperform it. Our open-sourced results are available as challenging and well-tuned baselines for more meaningful evaluations of novel optimization methods without requiring any further computational efforts.", "bibtex": "@InProceedings{pmlr-v139-schmidt21a,\n title = \t {Descending through a Crowded Valley - Benchmarking Deep Learning Optimizers},\n author = {Schmidt, Robin M and Schneider, Frank and Hennig, Philipp},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9367--9376},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/schmidt21a/schmidt21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/schmidt21a.html},\n abstract = \t {Choosing the optimizer is considered to be among the most crucial design decisions in deep learning, and it is not an easy one. The growing literature now lists hundreds of optimization methods. In the absence of clear theoretical guidance and conclusive empirical evidence, the decision is often made based on anecdotes. In this work, we aim to replace these anecdotes, if not with a conclusive ranking, then at least with evidence-backed heuristics. To do so, we perform an extensive, standardized benchmark of fifteen particularly popular deep learning optimizers while giving a concise overview of the wide range of possible choices. Analyzing more than 50,000 individual runs, we contribute the following three points: (i) Optimizer performance varies greatly across tasks. (ii) We observe that evaluating multiple optimizers with default parameters works approximately as well as tuning the hyperparameters of a single, fixed optimizer. (iii) While we cannot discern an optimization method clearly dominating across all tested tasks, we identify a significantly reduced subset of specific optimizers and parameter choices that generally lead to competitive results in our experiments: Adam remains a strong contender, with newer methods failing to significantly and consistently outperform it. Our open-sourced results are available as challenging and well-tuned baselines for more meaningful evaluations of novel optimization methods without requiring any further computational efforts.}\n}", "pdf": "http://proceedings.mlr.press/v139/schmidt21a/schmidt21a.pdf", "supp": "", "pdf_size": 482873, "gs_citation": 232, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13765522216321935825&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": "Methods of Machine Learning, University of T\u00fcbingen, T\u00fcbingen, Germany + Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Methods of Machine Learning, University of T\u00fcbingen, T\u00fcbingen, Germany + Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Methods of Machine Learning, University of T\u00fcbingen, T\u00fcbingen, Germany + Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "web.de;uni-tuebingen.de; ", "email": "web.de;uni-tuebingen.de; ", "github": "https://github.com/SirRob1997/Crowded-Valley---Results", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/schmidt21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of T\u00fcbingen;Max Planck Institute for Intelligent Systems", "aff_unique_dep": "Methods of Machine Learning;", "aff_unique_url": "https://www.uni-tuebingen.de;https://www.mpi-is.mpg.de", "aff_unique_abbr": "Uni T\u00fcbingen;MPI-IS", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "T\u00fcbingen", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Germany" }, { "title": "Detecting Rewards Deterioration in Episodic Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8883", "id": "8883", "proceeding": "http://proceedings.mlr.press/v139/greenberg21a.html", "slides": "/media/icml-2021/Slides/8883.pdf", "author_site": "Ido Greenberg, Shie Mannor", "author": "Ido Greenberg; Shie Mannor", "abstract": "In many RL applications, once training ends, it is vital to detect any deterioration in the agent performance as soon as possible. Furthermore, it often has to be done without modifying the policy and under minimal assumptions regarding the environment. In this paper, we address this problem by focusing directly on the rewards and testing for degradation. We consider an episodic framework, where the rewards within each episode are not independent, nor identically-distributed, nor Markov. We present this problem as a multivariate mean-shift detection problem with possibly partial observations. We define the mean-shift in a way corresponding to deterioration of a temporal signal (such as the rewards), and derive a test for this problem with optimal statistical power. Empirically, on deteriorated rewards in control problems (generated using various environment modifications), the test is demonstrated to be more powerful than standard tests - often by orders of magnitude. We also suggest a novel Bootstrap mechanism for False Alarm Rate control (BFAR), applicable to episodic (non-i.i.d) signal and allowing our test to run sequentially in an online manner. Our method does not rely on a learned model of the environment, is entirely external to the agent, and in fact can be applied to detect changes or drifts in any episodic signal.", "bibtex": "@InProceedings{pmlr-v139-greenberg21a,\n title = \t {Detecting Rewards Deterioration in Episodic Reinforcement Learning},\n author = {Greenberg, Ido and Mannor, Shie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3842--3853},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/greenberg21a/greenberg21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/greenberg21a.html},\n abstract = \t {In many RL applications, once training ends, it is vital to detect any deterioration in the agent performance as soon as possible. Furthermore, it often has to be done without modifying the policy and under minimal assumptions regarding the environment. In this paper, we address this problem by focusing directly on the rewards and testing for degradation. We consider an episodic framework, where the rewards within each episode are not independent, nor identically-distributed, nor Markov. We present this problem as a multivariate mean-shift detection problem with possibly partial observations. We define the mean-shift in a way corresponding to deterioration of a temporal signal (such as the rewards), and derive a test for this problem with optimal statistical power. Empirically, on deteriorated rewards in control problems (generated using various environment modifications), the test is demonstrated to be more powerful than standard tests - often by orders of magnitude. We also suggest a novel Bootstrap mechanism for False Alarm Rate control (BFAR), applicable to episodic (non-i.i.d) signal and allowing our test to run sequentially in an online manner. Our method does not rely on a learned model of the environment, is entirely external to the agent, and in fact can be applied to detect changes or drifts in any episodic signal.}\n}", "pdf": "http://proceedings.mlr.press/v139/greenberg21a/greenberg21a.pdf", "supp": "", "pdf_size": 464156, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6107338977661068725&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Electric Engineering, Technion, Israel+Nvidia Research; Department of Electric Engineering, Technion, Israel+Nvidia Research", "aff_domain": "campus.technion.ac.il;ee.technion.ac.il", "email": "campus.technion.ac.il;ee.technion.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/greenberg21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Technion;NVIDIA", "aff_unique_dep": "Department of Electric Engineering;NVIDIA Research", "aff_unique_url": "https://www.technion.ac.il;https://www.nvidia.com/research", "aff_unique_abbr": "Technion;NVIDIA", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+1", "aff_country_unique": "Israel;United States" }, { "title": "Detection of Signal in the Spiked Rectangular Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10495", "id": "10495", "proceeding": "http://proceedings.mlr.press/v139/jung21a.html", "slides": "/media/icml-2021/Slides/10495.pdf", "author_site": "Ji Hyung Jung, Hye Won Chung, Ji Oon Lee", "author": "Ji Hyung Jung; Hye Won Chung; Ji Oon Lee", "abstract": "We consider the problem of detecting signals in the rank-one signal-plus-noise data matrix models that generalize the spiked Wishart matrices. We show that the principal component analysis can be improved by pre-transforming the matrix entries if the noise is non-Gaussian. As an intermediate step, we prove a sharp phase transition of the largest eigenvalues of spiked rectangular matrices, which extends the Baik\u2013Ben Arous\u2013P\u00e9ch\u00e9 (BBP) transition. We also propose a hypothesis test to detect the presence of signal with low computational complexity, based on the linear spectral statistics, which minimizes the sum of the Type-I and Type-II errors when the noise is Gaussian.", "bibtex": "@InProceedings{pmlr-v139-jung21a,\n title = \t {Detection of Signal in the Spiked Rectangular Models},\n author = {Jung, Ji Hyung and Chung, Hye Won and Lee, Ji Oon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5158--5167},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jung21a/jung21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jung21a.html},\n abstract = \t {We consider the problem of detecting signals in the rank-one signal-plus-noise data matrix models that generalize the spiked Wishart matrices. We show that the principal component analysis can be improved by pre-transforming the matrix entries if the noise is non-Gaussian. As an intermediate step, we prove a sharp phase transition of the largest eigenvalues of spiked rectangular matrices, which extends the Baik\u2013Ben Arous\u2013P\u00e9ch\u00e9 (BBP) transition. We also propose a hypothesis test to detect the presence of signal with low computational complexity, based on the linear spectral statistics, which minimizes the sum of the Type-I and Type-II errors when the noise is Gaussian.}\n}", "pdf": "http://proceedings.mlr.press/v139/jung21a/jung21a.pdf", "supp": "", "pdf_size": 480879, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9185324071618649350&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Mathematical Sciences, KAIST, Daejeon, Korea; School of Electrical Engineering, KAIST, Daejeon, Korea; School of Mathematics, KIAS, Seoul, Korea + Department of Mathematical Sciences, KAIST, Daejeon, Korea", "aff_domain": "kaist.edu;kaist.ac.kr;kaist.edu", "email": "kaist.edu;kaist.ac.kr;kaist.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jung21a.html", "aff_unique_index": "0;0;1+0", "aff_unique_norm": "KAIST;KIAS", "aff_unique_dep": "Department of Mathematical Sciences;School of Mathematics", "aff_unique_url": "https://www.kaist.ac.kr;http://www.kias.re.kr", "aff_unique_abbr": "KAIST;KIAS", "aff_campus_unique_index": "0;0;1+0", "aff_campus_unique": "Daejeon;Seoul", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "South Korea" }, { "title": "Dichotomous Optimistic Search to Quantify Human Perception", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8599", "id": "8599", "proceeding": "http://proceedings.mlr.press/v139/audiffren21a.html", "slides": "", "author": "Julien Audiffren", "abstract": "In this paper we address a variant of the continuous multi-armed bandits problem, called the threshold estimation problem, which is at the heart of many psychometric experiments. Here, the objective is to estimate the sensitivity threshold for an unknown psychometric function Psi, which is assumed to be non decreasing and continuous. Our algorithm, Dichotomous Optimistic Search (DOS), efficiently solves this task by taking inspiration from hierarchical multi-armed bandits and Black-box optimization. Compared to previous approaches, DOS is model free and only makes minimal assumption on Psi smoothness, while having strong theoretical guarantees that compares favorably to recent methods from both Psychophysics and Global Optimization. We also empirically evaluate DOS and show that it significantly outperforms these methods, both in experiments that mimics the conduct of a psychometric experiment, and in tests with large pulls budgets that illustrate the faster convergence rate.", "bibtex": "@InProceedings{pmlr-v139-audiffren21a,\n title = \t {Dichotomous Optimistic Search to Quantify Human Perception},\n author = {Audiffren, Julien},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {414--424},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/audiffren21a/audiffren21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/audiffren21a.html},\n abstract = \t {In this paper we address a variant of the continuous multi-armed bandits problem, called the threshold estimation problem, which is at the heart of many psychometric experiments. Here, the objective is to estimate the sensitivity threshold for an unknown psychometric function Psi, which is assumed to be non decreasing and continuous. Our algorithm, Dichotomous Optimistic Search (DOS), efficiently solves this task by taking inspiration from hierarchical multi-armed bandits and Black-box optimization. Compared to previous approaches, DOS is model free and only makes minimal assumption on Psi smoothness, while having strong theoretical guarantees that compares favorably to recent methods from both Psychophysics and Global Optimization. We also empirically evaluate DOS and show that it significantly outperforms these methods, both in experiments that mimics the conduct of a psychometric experiment, and in tests with large pulls budgets that illustrate the faster convergence rate.}\n}", "pdf": "http://proceedings.mlr.press/v139/audiffren21a/audiffren21a.pdf", "supp": "", "pdf_size": 685701, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1979129185095328735&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Departement of Neuroscience, University of Fribourg, Fribourg, Switzerland", "aff_domain": "unifr.ch", "email": "unifr.ch", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/audiffren21a.html", "aff_unique_index": "0", "aff_unique_norm": "University of Fribourg", "aff_unique_dep": "Departement of Neuroscience", "aff_unique_url": "https://www.unifr.ch", "aff_unique_abbr": "", "aff_campus_unique_index": "0", "aff_campus_unique": "Fribourg", "aff_country_unique_index": "0", "aff_country_unique": "Switzerland" }, { "title": "Differentiable Dynamic Quantization with Mixed Precision and Adaptive Resolution", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8827", "id": "8827", "proceeding": "http://proceedings.mlr.press/v139/zhang21r.html", "slides": "", "author_site": "zhaoyang zhang, Wenqi Shao, Jinwei Gu, Xiaogang Wang, Ping Luo", "author": "Zhaoyang Zhang; Wenqi Shao; Jinwei Gu; Xiaogang Wang; Ping Luo", "abstract": "Model quantization is challenging due to many tedious hyper-parameters such as precision (bitwidth), dynamic range (minimum and maximum discrete values) and stepsize (interval between discrete values). Unlike prior arts that carefully tune these values, we present a fully differentiable approach to learn all of them, named Differentiable Dynamic Quantization (DDQ), which has several benefits. (1) DDQ is able to quantize challenging lightweight architectures like MobileNets, where different layers prefer different quantization parameters. (2) DDQ is hardware-friendly and can be easily implemented using low-precision matrix-vector multiplication, making it capable in many hardware such as ARM. (3) Extensive experiments show that DDQ outperforms prior arts on many networks and benchmarks, especially when models are already efficient and compact. e.g., DDQ is the first approach that achieves lossless 4-bit quantization for MobileNetV2 on ImageNet.", "bibtex": "@InProceedings{pmlr-v139-zhang21r,\n title = \t {Differentiable Dynamic Quantization with Mixed Precision and Adaptive Resolution},\n author = {Zhang, Zhaoyang and Shao, Wenqi and Gu, Jinwei and Wang, Xiaogang and Luo, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12546--12556},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21r/zhang21r.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21r.html},\n abstract = \t {Model quantization is challenging due to many tedious hyper-parameters such as precision (bitwidth), dynamic range (minimum and maximum discrete values) and stepsize (interval between discrete values). Unlike prior arts that carefully tune these values, we present a fully differentiable approach to learn all of them, named Differentiable Dynamic Quantization (DDQ), which has several benefits. (1) DDQ is able to quantize challenging lightweight architectures like MobileNets, where different layers prefer different quantization parameters. (2) DDQ is hardware-friendly and can be easily implemented using low-precision matrix-vector multiplication, making it capable in many hardware such as ARM. (3) Extensive experiments show that DDQ outperforms prior arts on many networks and benchmarks, especially when models are already efficient and compact. e.g., DDQ is the first approach that achieves lossless 4-bit quantization for MobileNetV2 on ImageNet.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21r/zhang21r.pdf", "supp": "", "pdf_size": 1443119, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10702319906241740586&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "The Chinese University of Hong Kong; The Chinese University of Hong Kong; SenseBrain, Ltd+Shanghai AI Lab; The Chinese University of Hong Kong; Hong Kong University", "aff_domain": "link.cuhk.edu.hk; ; ; ; ", "email": "link.cuhk.edu.hk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhang21r.html", "aff_unique_index": "0;0;1+2;0;3", "aff_unique_norm": "Chinese University of Hong Kong;SenseBrain;Shanghai AI Lab;Hong Kong University", "aff_unique_dep": ";Ltd;;", "aff_unique_url": "https://www.cuhk.edu.hk;;https://www.shanghaiailab.com;https://www.hku.hk", "aff_unique_abbr": "CUHK;;SAIL;HKU", "aff_campus_unique_index": "0;0;;0;0", "aff_campus_unique": "Hong Kong SAR;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China;" }, { "title": "Differentiable Particle Filtering via Entropy-Regularized Optimal Transport", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8557", "id": "8557", "proceeding": "http://proceedings.mlr.press/v139/corenflos21a.html", "slides": "/media/icml-2021/Slides/8557.pdf", "author_site": "Adrien Corenflos, James Thornton, George Deligiannidis, Arnaud Doucet", "author": "Adrien Corenflos; James Thornton; George Deligiannidis; Arnaud Doucet", "abstract": "Particle Filtering (PF) methods are an established class of procedures for performing inference in non-linear state-space models. Resampling is a key ingredient of PF necessary to obtain low variance likelihood and states estimates. However, traditional resampling methods result in PF-based loss functions being non-differentiable with respect to model and PF parameters. In a variational inference context, resampling also yields high variance gradient estimates of the PF-based evidence lower bound. By leveraging optimal transport ideas, we introduce a principled differentiable particle filter and provide convergence results. We demonstrate this novel method on a variety of applications.", "bibtex": "@InProceedings{pmlr-v139-corenflos21a,\n title = \t {Differentiable Particle Filtering via Entropy-Regularized Optimal Transport},\n author = {Corenflos, Adrien and Thornton, James and Deligiannidis, George and Doucet, Arnaud},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2100--2111},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/corenflos21a/corenflos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/corenflos21a.html},\n abstract = \t {Particle Filtering (PF) methods are an established class of procedures for performing inference in non-linear state-space models. Resampling is a key ingredient of PF necessary to obtain low variance likelihood and states estimates. However, traditional resampling methods result in PF-based loss functions being non-differentiable with respect to model and PF parameters. In a variational inference context, resampling also yields high variance gradient estimates of the PF-based evidence lower bound. By leveraging optimal transport ideas, we introduce a principled differentiable particle filter and provide convergence results. We demonstrate this novel method on a variety of applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/corenflos21a/corenflos21a.pdf", "supp": "", "pdf_size": 2137331, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6170897491109878876&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering and Automation, Aalto University; Department of Statistics, University of Oxford; Department of Statistics, University of Oxford; Department of Statistics, University of Oxford", "aff_domain": "aalto.\ufb01;spc.ox.ac.uk; ; ", "email": "aalto.\ufb01;spc.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/corenflos21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Aalto University;University of Oxford", "aff_unique_dep": "Department of Electrical Engineering and Automation;Department of Statistics", "aff_unique_url": "https://www.aalto.fi;https://www.ox.ac.uk", "aff_unique_abbr": "Aalto;Oxford", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Oxford", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "Finland;United Kingdom" }, { "title": "Differentiable Sorting Networks for Scalable Sorting and Ranking Supervision", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8439", "id": "8439", "proceeding": "http://proceedings.mlr.press/v139/petersen21a.html", "slides": "/media/icml-2021/Slides/8439.pdf", "author_site": "Felix Petersen, Christian Borgelt, Hilde Kuehne, Oliver Deussen", "author": "Felix Petersen; Christian Borgelt; Hilde Kuehne; Oliver Deussen", "abstract": "Sorting and ranking supervision is a method for training neural networks end-to-end based on ordering constraints. That is, the ground truth order of sets of samples is known, while their absolute values remain unsupervised. For that, we propose differentiable sorting networks by relaxing their pairwise conditional swap operations. To address the problems of vanishing gradients and extensive blurring that arise with larger numbers of layers, we propose mapping activations to regions with moderate gradients. We consider odd-even as well as bitonic sorting networks, which outperform existing relaxations of the sorting operation. We show that bitonic sorting networks can achieve stable training on large input sets of up to 1024 elements.", "bibtex": "@InProceedings{pmlr-v139-petersen21a,\n title = \t {Differentiable Sorting Networks for Scalable Sorting and Ranking Supervision},\n author = {Petersen, Felix and Borgelt, Christian and Kuehne, Hilde and Deussen, Oliver},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8546--8555},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/petersen21a/petersen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/petersen21a.html},\n abstract = \t {Sorting and ranking supervision is a method for training neural networks end-to-end based on ordering constraints. That is, the ground truth order of sets of samples is known, while their absolute values remain unsupervised. For that, we propose differentiable sorting networks by relaxing their pairwise conditional swap operations. To address the problems of vanishing gradients and extensive blurring that arise with larger numbers of layers, we propose mapping activations to regions with moderate gradients. We consider odd-even as well as bitonic sorting networks, which outperform existing relaxations of the sorting operation. We show that bitonic sorting networks can achieve stable training on large input sets of up to 1024 elements.}\n}", "pdf": "http://proceedings.mlr.press/v139/petersen21a/petersen21a.pdf", "supp": "", "pdf_size": 318338, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17724555463141900355&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of Konstanz, Germany; University of Salzburg, Austria; University of Frankfurt, Germany+MIT-IBM Watson AI Lab; University of Konstanz, Germany", "aff_domain": "uni.kn; ; ; ", "email": "uni.kn; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/petersen21a.html", "aff_unique_index": "0;1;2+3;0", "aff_unique_norm": "University of Konstanz;University of Salzburg;University of Frankfurt;Massachusetts Institute of Technology", "aff_unique_dep": ";;;IBM Watson AI Lab", "aff_unique_url": "https://www.uni-konstanz.de;https://www.uni-salzburg.at;https://www.uni-frankfurt.de;https://www.mitibmwatsonailab.org", "aff_unique_abbr": "Uni Konstanz;USAL;UoF;MIT-IBM AI Lab", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0+2;0", "aff_country_unique": "Germany;Austria;United States" }, { "title": "Differentiable Spatial Planning using Transformers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9101", "id": "9101", "proceeding": "http://proceedings.mlr.press/v139/chaplot21a.html", "slides": "/media/icml-2021/Slides/9101.pdf", "author_site": "Devendra Singh Chaplot, Deepak Pathak, Jitendra Malik", "author": "Devendra Singh Chaplot; Deepak Pathak; Jitendra Malik", "abstract": "We consider the problem of spatial path planning. In contrast to the classical solutions which optimize a new plan from scratch and assume access to the full map with ground truth obstacle locations, we learn a planner from the data in a differentiable manner that allows us to leverage statistical regularities from past data. We propose Spatial Planning Transformers (SPT), which given an obstacle map learns to generate actions by planning over long-range spatial dependencies, unlike prior data-driven planners that propagate information locally via convolutional structure in an iterative manner. In the setting where the ground truth map is not known to the agent, we leverage pre-trained SPTs in an end-to-end framework that has the structure of mapper and planner built into it which allows seamless generalization to out-of-distribution maps and goals. SPTs outperform prior state-of-the-art differentiable planners across all the setups for both manipulation and navigation tasks, leading to an absolute improvement of 7-19%.", "bibtex": "@InProceedings{pmlr-v139-chaplot21a,\n title = \t {Differentiable Spatial Planning using Transformers},\n author = {Chaplot, Devendra Singh and Pathak, Deepak and Malik, Jitendra},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1484--1495},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chaplot21a/chaplot21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chaplot21a.html},\n abstract = \t {We consider the problem of spatial path planning. In contrast to the classical solutions which optimize a new plan from scratch and assume access to the full map with ground truth obstacle locations, we learn a planner from the data in a differentiable manner that allows us to leverage statistical regularities from past data. We propose Spatial Planning Transformers (SPT), which given an obstacle map learns to generate actions by planning over long-range spatial dependencies, unlike prior data-driven planners that propagate information locally via convolutional structure in an iterative manner. In the setting where the ground truth map is not known to the agent, we leverage pre-trained SPTs in an end-to-end framework that has the structure of mapper and planner built into it which allows seamless generalization to out-of-distribution maps and goals. SPTs outperform prior state-of-the-art differentiable planners across all the setups for both manipulation and navigation tasks, leading to an absolute improvement of 7-19%.}\n}", "pdf": "http://proceedings.mlr.press/v139/chaplot21a/chaplot21a.pdf", "supp": "", "pdf_size": 2429246, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2713003959163330423&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Facebook AI Research + Carnegie Mellon University; Carnegie Mellon University; UC Berkeley", "aff_domain": "fb.com; ; ", "email": "fb.com; ; ", "github": "", "project": "https://devendrachaplot.github.io/projects/spatial-planning-transformers", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chaplot21a.html", "aff_unique_index": "0+1;1;2", "aff_unique_norm": "Meta;Carnegie Mellon University;University of California, Berkeley", "aff_unique_dep": "Facebook AI Research;;", "aff_unique_url": "https://research.facebook.com;https://www.cmu.edu;https://www.berkeley.edu", "aff_unique_abbr": "FAIR;CMU;UC Berkeley", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Differentially Private Aggregation in the Shuffle Model: Almost Central Accuracy in Almost a Single Message", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8511", "id": "8511", "proceeding": "http://proceedings.mlr.press/v139/ghazi21a.html", "slides": "/media/icml-2021/Slides/8511_wY80tck.pdf", "author_site": "Badih Ghazi, Ravi Kumar, Pasin Manurangsi, Rasmus Pagh, Amer Sinha", "author": "Badih Ghazi; Ravi Kumar; Pasin Manurangsi; Rasmus Pagh; Amer Sinha", "abstract": "The shuffle model of differential privacy has attracted attention in the literature due to it being a middle ground between the well-studied central and local models. In this work, we study the problem of summing (aggregating) real numbers or integers, a basic primitive in numerous machine learning tasks, in the shuffle model. We give a protocol achieving error arbitrarily close to that of the (Discrete) Laplace mechanism in central differential privacy, while each user only sends 1 + o(1) short messages in expectation.", "bibtex": "@InProceedings{pmlr-v139-ghazi21a,\n title = \t {Differentially Private Aggregation in the Shuffle Model: Almost Central Accuracy in Almost a Single Message},\n author = {Ghazi, Badih and Kumar, Ravi and Manurangsi, Pasin and Pagh, Rasmus and Sinha, Amer},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3692--3701},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ghazi21a/ghazi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ghazi21a.html},\n abstract = \t {The shuffle model of differential privacy has attracted attention in the literature due to it being a middle ground between the well-studied central and local models. In this work, we study the problem of summing (aggregating) real numbers or integers, a basic primitive in numerous machine learning tasks, in the shuffle model. We give a protocol achieving error arbitrarily close to that of the (Discrete) Laplace mechanism in central differential privacy, while each user only sends 1 + o(1) short messages in expectation.}\n}", "pdf": "http://proceedings.mlr.press/v139/ghazi21a/ghazi21a.pdf", "supp": "", "pdf_size": 689176, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=315956302310473364&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View; University of Copenhagen, Denmark; Google, San Bruno", "aff_domain": "gmail.com;gmail.com;google.com;di.ku.dk;google.com", "email": "gmail.com;gmail.com;google.com;di.ku.dk;google.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/ghazi21a.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "Google;University of Copenhagen", "aff_unique_dep": "Google Research;", "aff_unique_url": "https://research.google;https://www.ku.dk", "aff_unique_abbr": "Google;UCPH", "aff_campus_unique_index": "0;0;0;2", "aff_campus_unique": "Mountain View;;San Bruno", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "United States;Denmark" }, { "title": "Differentially Private Bayesian Inference for Generalized Linear Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9971", "id": "9971", "proceeding": "http://proceedings.mlr.press/v139/kulkarni21a.html", "slides": "", "author_site": "Tejas Kulkarni, Joonas J\u00e4lk\u00f6, Antti Koskela, Samuel Kaski, Antti Honkela", "author": "Tejas Kulkarni; Joonas J\u00e4lk\u00f6; Antti Koskela; Samuel Kaski; Antti Honkela", "abstract": "Generalized linear models (GLMs) such as logistic regression are among the most widely used arms in data analyst\u2019s repertoire and often used on sensitive datasets. A large body of prior works that investigate GLMs under differential privacy (DP) constraints provide only private point estimates of the regression coefficients, and are not able to quantify parameter uncertainty. In this work, with logistic and Poisson regression as running examples, we introduce a generic noise-aware DP Bayesian inference method for a GLM at hand, given a noisy sum of summary statistics. Quantifying uncertainty allows us to determine which of the regression coefficients are statistically significantly different from zero. We provide a previously unknown tight privacy analysis and experimentally demonstrate that the posteriors obtained from our model, while adhering to strong privacy guarantees, are close to the non-private posteriors.", "bibtex": "@InProceedings{pmlr-v139-kulkarni21a,\n title = \t {Differentially Private Bayesian Inference for Generalized Linear Models},\n author = {Kulkarni, Tejas and J{\\\"a}lk{\\\"o}, Joonas and Koskela, Antti and Kaski, Samuel and Honkela, Antti},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5838--5849},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kulkarni21a/kulkarni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kulkarni21a.html},\n abstract = \t {Generalized linear models (GLMs) such as logistic regression are among the most widely used arms in data analyst\u2019s repertoire and often used on sensitive datasets. A large body of prior works that investigate GLMs under differential privacy (DP) constraints provide only private point estimates of the regression coefficients, and are not able to quantify parameter uncertainty. In this work, with logistic and Poisson regression as running examples, we introduce a generic noise-aware DP Bayesian inference method for a GLM at hand, given a noisy sum of summary statistics. Quantifying uncertainty allows us to determine which of the regression coefficients are statistically significantly different from zero. We provide a previously unknown tight privacy analysis and experimentally demonstrate that the posteriors obtained from our model, while adhering to strong privacy guarantees, are close to the non-private posteriors.}\n}", "pdf": "http://proceedings.mlr.press/v139/kulkarni21a/kulkarni21a.pdf", "supp": "", "pdf_size": 529284, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9022698201017188324&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Helsinki Institute for Information Technology HIIT, Department of Computer Science, Aalto University, Finland+Department of Computer Science, University of Manchester, United Kingdom; Helsinki Institute for Information Technology HIIT, Department of Computer Science, Aalto University, Finland; Helsinki Institute for Information Technology HIIT, Department of Computer Science, University of Helsinki, Finland; Helsinki Institute for Information Technology HIIT, Department of Computer Science, Aalto University, Finland+Department of Computer Science, University of Manchester, United Kingdom; Helsinki Institute for Information Technology HIIT, Department of Computer Science, University of Helsinki, Finland", "aff_domain": "gmail.com; ; ; ;helsinki.fi", "email": "gmail.com; ; ; ;helsinki.fi", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kulkarni21a.html", "aff_unique_index": "0+1;0;2;0+1;2", "aff_unique_norm": "Aalto University;University of Manchester;University of Helsinki", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.aalto.fi;https://www.manchester.ac.uk;https://www.helsinki.fi", "aff_unique_abbr": "Aalto;UoM;UH", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Helsinki;", "aff_country_unique_index": "0+1;0;0;0+1;0", "aff_country_unique": "Finland;United Kingdom" }, { "title": "Differentially Private Correlation Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10369", "id": "10369", "proceeding": "http://proceedings.mlr.press/v139/bun21a.html", "slides": "/media/icml-2021/Slides/10369.pdf", "author_site": "Mark Bun, Marek Elias, Janardhan Kulkarni", "author": "Mark Bun; Marek Elias; Janardhan Kulkarni", "abstract": "Correlation clustering is a widely used technique in unsupervised machine learning. Motivated by applications where individual privacy is a concern, we initiate the study of differentially private correlation clustering. We propose an algorithm that achieves subquadratic additive error compared to the optimal cost. In contrast, straightforward adaptations of existing non-private algorithms all lead to a trivial quadratic error. Finally, we give a lower bound showing that any pure differentially private algorithm for correlation clustering requires additive error $\\Omega$(n).", "bibtex": "@InProceedings{pmlr-v139-bun21a,\n title = \t {Differentially Private Correlation Clustering},\n author = {Bun, Mark and Elias, Marek and Kulkarni, Janardhan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1136--1146},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bun21a/bun21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bun21a.html},\n abstract = \t {Correlation clustering is a widely used technique in unsupervised machine learning. Motivated by applications where individual privacy is a concern, we initiate the study of differentially private correlation clustering. We propose an algorithm that achieves subquadratic additive error compared to the optimal cost. In contrast, straightforward adaptations of existing non-private algorithms all lead to a trivial quadratic error. Finally, we give a lower bound showing that any pure differentially private algorithm for correlation clustering requires additive error $\\Omega$(n).}\n}", "pdf": "http://proceedings.mlr.press/v139/bun21a/bun21a.pdf", "supp": "", "pdf_size": 315694, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9991556528321413342&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Boston University; CWI, Amsterdam; Microsoft Research, Redmond", "aff_domain": "cwi.nl; ; ", "email": "cwi.nl; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bun21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Boston University;Centrum Wiskunde & Informatica;Microsoft", "aff_unique_dep": ";;Microsoft Research", "aff_unique_url": "https://www.bu.edu;https://www.cwi.nl;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "BU;CWI;MSR", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Amsterdam;Redmond", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Netherlands" }, { "title": "Differentially Private Densest Subgraph Detection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8641", "id": "8641", "proceeding": "http://proceedings.mlr.press/v139/nguyen21i.html", "slides": "", "author_site": "Dung Nguyen, Anil Vullikanti", "author": "Dung Nguyen; Anil Vullikanti", "abstract": "Densest subgraph detection is a fundamental graph mining problem, with a large number of applications. There has been a lot of work on efficient algorithms for finding the densest subgraph in massive networks. However, in many domains, the network is private, and returning a densest subgraph can reveal information about the network. Differential privacy is a powerful framework to handle such settings. We study the densest subgraph problem in the edge privacy model, in which the edges of the graph are private. We present the first sequential and parallel differentially private algorithms for this problem. We show that our algorithms have an additive approximation guarantee. We evaluate our algorithms on a large number of real-world networks, and observe a good privacy-accuracy tradeoff when the network has high density.", "bibtex": "@InProceedings{pmlr-v139-nguyen21i,\n title = \t {Differentially Private Densest Subgraph Detection},\n author = {Nguyen, Dung and Vullikanti, Anil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8140--8151},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21i/nguyen21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21i.html},\n abstract = \t {Densest subgraph detection is a fundamental graph mining problem, with a large number of applications. There has been a lot of work on efficient algorithms for finding the densest subgraph in massive networks. However, in many domains, the network is private, and returning a densest subgraph can reveal information about the network. Differential privacy is a powerful framework to handle such settings. We study the densest subgraph problem in the edge privacy model, in which the edges of the graph are private. We present the first sequential and parallel differentially private algorithms for this problem. We show that our algorithms have an additive approximation guarantee. We evaluate our algorithms on a large number of real-world networks, and observe a good privacy-accuracy tradeoff when the network has high density.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21i/nguyen21i.pdf", "supp": "", "pdf_size": 1231616, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2476475746573409509&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Virginia, Virginia, USA+Biocomplexity Institute and Inintiative, University of Virginia, Virginia, USA; Department of Computer Science, University of Virginia, Virginia, USA+Biocomplexity Institute and Inintiative, University of Virginia, Virginia, USA", "aff_domain": "virginia.edu; ", "email": "virginia.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/nguyen21i.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "University of Virginia", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.virginia.edu", "aff_unique_abbr": "UVA", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Virginia", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "Differentially Private Quantiles", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8845", "id": "8845", "proceeding": "http://proceedings.mlr.press/v139/gillenwater21a.html", "slides": "", "author_site": "Jennifer Gillenwater, Matthew Joseph, Alex Kulesza", "author": "Jennifer Gillenwater; Matthew Joseph; Alex Kulesza", "abstract": "Quantiles are often used for summarizing and understanding data. If that data is sensitive, it may be necessary to compute quantiles in a way that is differentially private, providing theoretical guarantees that the result does not reveal private information. However, when multiple quantiles are needed, existing differentially private algorithms fare poorly: they either compute quantiles individually, splitting the privacy budget, or summarize the entire distribution, wasting effort. In either case the result is reduced accuracy. In this work we propose an instance of the exponential mechanism that simultaneously estimates exactly $m$ quantiles from $n$ data points while guaranteeing differential privacy. The utility function is carefully structured to allow for an efficient implementation that returns estimates of all $m$ quantiles in time $O(mn\\log(n) + m^2n)$. Experiments show that our method significantly outperforms the current state of the art on both real and synthetic data while remaining efficient enough to be practical.", "bibtex": "@InProceedings{pmlr-v139-gillenwater21a,\n title = \t {Differentially Private Quantiles},\n author = {Gillenwater, Jennifer and Joseph, Matthew and Kulesza, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3713--3722},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gillenwater21a/gillenwater21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gillenwater21a.html},\n abstract = \t {Quantiles are often used for summarizing and understanding data. If that data is sensitive, it may be necessary to compute quantiles in a way that is differentially private, providing theoretical guarantees that the result does not reveal private information. However, when multiple quantiles are needed, existing differentially private algorithms fare poorly: they either compute quantiles individually, splitting the privacy budget, or summarize the entire distribution, wasting effort. In either case the result is reduced accuracy. In this work we propose an instance of the exponential mechanism that simultaneously estimates exactly $m$ quantiles from $n$ data points while guaranteeing differential privacy. The utility function is carefully structured to allow for an efficient implementation that returns estimates of all $m$ quantiles in time $O(mn\\log(n) + m^2n)$. Experiments show that our method significantly outperforms the current state of the art on both real and synthetic data while remaining efficient enough to be practical.}\n}", "pdf": "http://proceedings.mlr.press/v139/gillenwater21a/gillenwater21a.pdf", "supp": "", "pdf_size": 481709, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15453581247929130350&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google Research New York; Google Research New York; Google Research New York", "aff_domain": "google.com;google.com;google.com", "email": "google.com;google.com;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gillenwater21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Differentially Private Query Release Through Adaptive Projection", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9359", "id": "9359", "proceeding": "http://proceedings.mlr.press/v139/aydore21a.html", "slides": "", "author_site": "Sergul Aydore, William Brown, Michael Kearns, Krishnaram Kenthapadi, Luca Melis, Aaron Roth, Ankit Siva", "author": "Sergul Aydore; William Brown; Michael Kearns; Krishnaram Kenthapadi; Luca Melis; Aaron Roth; Ankit A. Siva", "abstract": "We propose, implement, and evaluate a new algo-rithm for releasing answers to very large numbersof statistical queries likek-way marginals, sub-ject to differential privacy. Our algorithm makesadaptive use of a continuous relaxation of thePro-jection Mechanism, which answers queries on theprivate dataset using simple perturbation, and thenattempts to find the synthetic dataset that mostclosely matches the noisy answers. We use a con-tinuous relaxation of the synthetic dataset domainwhich makes the projection loss differentiable,and allows us to use efficient ML optimizationtechniques and tooling. Rather than answering allqueries up front, we make judicious use of ourprivacy budget by iteratively finding queries forwhich our (relaxed) synthetic data has high error,and then repeating the projection. Randomizedrounding allows us to obtain synthetic data in theoriginal schema. We perform experimental evalu-ations across a range of parameters and datasets,and find that our method outperforms existingalgorithms on large query classes.", "bibtex": "@InProceedings{pmlr-v139-aydore21a,\n title = \t {Differentially Private Query Release Through Adaptive Projection},\n author = {Aydore, Sergul and Brown, William and Kearns, Michael and Kenthapadi, Krishnaram and Melis, Luca and Roth, Aaron and Siva, Ankit A},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {457--467},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/aydore21a/aydore21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/aydore21a.html},\n abstract = \t {We propose, implement, and evaluate a new algo-rithm for releasing answers to very large numbersof statistical queries likek-way marginals, sub-ject to differential privacy. Our algorithm makesadaptive use of a continuous relaxation of thePro-jection Mechanism, which answers queries on theprivate dataset using simple perturbation, and thenattempts to find the synthetic dataset that mostclosely matches the noisy answers. We use a con-tinuous relaxation of the synthetic dataset domainwhich makes the projection loss differentiable,and allows us to use efficient ML optimizationtechniques and tooling. Rather than answering allqueries up front, we make judicious use of ourprivacy budget by iteratively finding queries forwhich our (relaxed) synthetic data has high error,and then repeating the projection. Randomizedrounding allows us to obtain synthetic data in theoriginal schema. We perform experimental evalu-ations across a range of parameters and datasets,and find that our method outperforms existingalgorithms on large query classes.}\n}", "pdf": "http://proceedings.mlr.press/v139/aydore21a/aydore21a.pdf", "supp": "", "pdf_size": 700398, "gs_citation": 95, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6645658833655661139&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Amazon Web Services AI/ML; Columbia University, New York, NY, USA+University of Pennsylvania, Philadelphia, PA, USA; University of Pennsylvania, Philadelphia, PA, USA; Amazon Web Services AI/ML; University of Pennsylvania, Philadelphia, PA, USA; University of Pennsylvania, Philadelphia, PA, USA+Amazon Web Services AI/ML; Amazon Web Services AI/ML", "aff_domain": "amazon.com;columbia.edu;cis.upenn.edu;amazon.com;cis.upenn.edu;cis.upenn.edu;amazon.com", "email": "amazon.com;columbia.edu;cis.upenn.edu;amazon.com;cis.upenn.edu;cis.upenn.edu;amazon.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/aydore21a.html", "aff_unique_index": "0;1+2;2;0;2;2+0;0", "aff_unique_norm": "Amazon;Columbia University;University of Pennsylvania", "aff_unique_dep": "AI/ML;;", "aff_unique_url": "https://aws.amazon.com/machine-learning;https://www.columbia.edu;https://www.upenn.edu", "aff_unique_abbr": "AWS AI/ML;Columbia;UPenn", "aff_campus_unique_index": "1+2;2;2;2", "aff_campus_unique": ";New York;Philadelphia", "aff_country_unique_index": "0;0+0;0;0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Differentially Private Sliced Wasserstein Distance", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10729", "id": "10729", "proceeding": "http://proceedings.mlr.press/v139/rakotomamonjy21a.html", "slides": "/media/icml-2021/Slides/10729.pdf", "author_site": "alain rakotomamonjy, Ralaivola Liva", "author": "Alain Rakotomamonjy; Ralaivola Liva", "abstract": "Developing machine learning methods that are privacy preserving is today a central topic of research, with huge practical impacts. Among the numerous ways to address privacy-preserving learning, we here take the perspective of computing the divergences between distributions under the Differential Privacy (DP) framework \u2014 being able to compute divergences between distributions is pivotal for many machine learning problems, such as learning generative models or domain adaptation problems. Instead of resorting to the popular gradient-based sanitization method for DP, we tackle the problem at its roots by focusing on the Sliced Wasserstein Distance and seamlessly making it differentially private. Our main contribution is as follows: we analyze the property of adding a Gaussian perturbation to the intrinsic randomized mechanism of the Sliced Wasserstein Distance, and we establish the sensitivity of the resulting differentially private mechanism. One of our important findings is that this DP mechanism transforms the Sliced Wasserstein distance into another distance, that we call the Smoothed Sliced Wasserstein Distance. This new differentially private distribution distance can be plugged into generative models and domain adaptation algorithms in a transparent way, and we empirically show that it yields highly competitive performance compared with gradient-based DP approaches from the literature, with almost no loss in accuracy for the domain adaptation problems that we consider.", "bibtex": "@InProceedings{pmlr-v139-rakotomamonjy21a,\n title = \t {Differentially Private Sliced Wasserstein Distance},\n author = {Rakotomamonjy, Alain and Liva, Ralaivola},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8810--8820},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rakotomamonjy21a/rakotomamonjy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rakotomamonjy21a.html},\n abstract = \t {Developing machine learning methods that are privacy preserving is today a central topic of research, with huge practical impacts. Among the numerous ways to address privacy-preserving learning, we here take the perspective of computing the divergences between distributions under the Differential Privacy (DP) framework \u2014 being able to compute divergences between distributions is pivotal for many machine learning problems, such as learning generative models or domain adaptation problems. Instead of resorting to the popular gradient-based sanitization method for DP, we tackle the problem at its roots by focusing on the Sliced Wasserstein Distance and seamlessly making it differentially private. Our main contribution is as follows: we analyze the property of adding a Gaussian perturbation to the intrinsic randomized mechanism of the Sliced Wasserstein Distance, and we establish the sensitivity of the resulting differentially private mechanism. One of our important findings is that this DP mechanism transforms the Sliced Wasserstein distance into another distance, that we call the Smoothed Sliced Wasserstein Distance. This new differentially private distribution distance can be plugged into generative models and domain adaptation algorithms in a transparent way, and we empirically show that it yields highly competitive performance compared with gradient-based DP approaches from the literature, with almost no loss in accuracy for the domain adaptation problems that we consider.}\n}", "pdf": "http://proceedings.mlr.press/v139/rakotomamonjy21a/rakotomamonjy21a.pdf", "supp": "", "pdf_size": 1866707, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11153564524741628543&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Criteo AI Lab, Paris, France+LITIS EA4108, Universit\u00e9 de Rouen Normandie, Saint-Etienne du Rouvray, France; Criteo AI Lab, Paris, France", "aff_domain": "insa-rouen.fr; ", "email": "insa-rouen.fr; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/rakotomamonjy21a.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "Criteo;Universit\u00e9 de Rouen Normandie", "aff_unique_dep": "Criteo AI Lab;LITIS EA4108", "aff_unique_url": "https://www.criteo.com;https://www.univ-rouen.fr", "aff_unique_abbr": "Criteo;", "aff_campus_unique_index": "0+1;0", "aff_campus_unique": "Paris;Saint-Etienne du Rouvray", "aff_country_unique_index": "0+0;0", "aff_country_unique": "France" }, { "title": "Differentially-Private Clustering of Easy Instances", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8567", "id": "8567", "proceeding": "http://proceedings.mlr.press/v139/cohen21c.html", "slides": "/media/icml-2021/Slides/8567.pdf", "author_site": "Edith Cohen, Haim Kaplan, Yishay Mansour, Uri Stemmer, Eliad Tsfadia", "author": "Edith Cohen; Haim Kaplan; Yishay Mansour; Uri Stemmer; Eliad Tsfadia", "abstract": "Clustering is a fundamental problem in data analysis. In differentially private clustering, the goal is to identify k cluster centers without disclosing information on individual data points. Despite significant research progress, the problem had so far resisted practical solutions. In this work we aim at providing simple implementable differentrially private clustering algorithms when the the data is \"easy,\" e.g., when there exists a significant separation between the clusters. For the easy instances we consider, we have a simple implementation based on utilizing non-private clustering algorithms, and combining them privately. We are able to get improved sample complexity bounds in some cases of Gaussian mixtures and k-means. We complement our theoretical algorithms with experiments of simulated data.", "bibtex": "@InProceedings{pmlr-v139-cohen21c,\n title = \t {Differentially-Private Clustering of Easy Instances},\n author = {Cohen, Edith and Kaplan, Haim and Mansour, Yishay and Stemmer, Uri and Tsfadia, Eliad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2049--2059},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cohen21c/cohen21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/cohen21c.html},\n abstract = \t {Clustering is a fundamental problem in data analysis. In differentially private clustering, the goal is to identify k cluster centers without disclosing information on individual data points. Despite significant research progress, the problem had so far resisted practical solutions. In this work we aim at providing simple implementable differentrially private clustering algorithms when the the data is \"easy,\" e.g., when there exists a significant separation between the clusters. For the easy instances we consider, we have a simple implementation based on utilizing non-private clustering algorithms, and combining them privately. We are able to get improved sample complexity bounds in some cases of Gaussian mixtures and k-means. We complement our theoretical algorithms with experiments of simulated data.}\n}", "pdf": "http://proceedings.mlr.press/v139/cohen21c/cohen21c.pdf", "supp": "", "pdf_size": 556848, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10100985380971871514&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research + Blavatnik School of Computer Science, Tel Aviv University; Google Research + Blavatnik School of Computer Science, Tel Aviv University; Google Research + Blavatnik School of Computer Science, Tel Aviv University; Google Research + Ben-Gurion University; Google Research + Blavatnik School of Computer Science, Tel Aviv University", "aff_domain": "google.com;tau.ac.il;tau.ac.il;post.bgu.ac.il;gmail.com", "email": "google.com;tau.ac.il;tau.ac.il;post.bgu.ac.il;gmail.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/cohen21c.html", "aff_unique_index": "0+1;0+1;0+1;0+2;0+1", "aff_unique_norm": "Google;Tel Aviv University;Ben-Gurion University of the Negev", "aff_unique_dep": "Google Research;Blavatnik School of Computer Science;", "aff_unique_url": "https://research.google;https://www.tau.ac.il;https://www.bgu.ac.il", "aff_unique_abbr": "Google Research;TAU;BGU", "aff_campus_unique_index": "0+1;0+1;0+1;0;0+1", "aff_campus_unique": "Mountain View;Tel Aviv;", "aff_country_unique_index": "0+1;0+1;0+1;0+1;0+1", "aff_country_unique": "United States;Israel" }, { "title": "Diffusion Earth Mover\u2019s Distance and Distribution Embeddings", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9747", "id": "9747", "proceeding": "http://proceedings.mlr.press/v139/tong21a.html", "slides": "/media/icml-2021/Slides/9747.pdf", "author_site": "Alexander Tong, Guillaume Huguet, Amine Natik, Kincaid Macdonald, MANIK KUCHROO, Ronald Coifman, Guy Wolf, Smita Krishnaswamy", "author": "Alexander Y Tong; Guillaume Huguet; Amine Natik; Kincaid Macdonald; Manik Kuchroo; Ronald Coifman; Guy Wolf; Smita Krishnaswamy", "abstract": "We propose a new fast method of measuring distances between large numbers of related high dimensional datasets called the Diffusion Earth Mover\u2019s Distance (EMD). We model the datasets as distributions supported on common data graph that is derived from the affinity matrix computed on the combined data. In such cases where the graph is a discretization of an underlying Riemannian closed manifold, we prove that Diffusion EMD is topologically equivalent to the standard EMD with a geodesic ground distance. Diffusion EMD can be computed in {\u00d5}(n) time and is more accurate than similarly fast algorithms such as tree-based EMDs. We also show Diffusion EMD is fully differentiable, making it amenable to future uses in gradient-descent frameworks such as deep neural networks. Finally, we demonstrate an application of Diffusion EMD to single cell data collected from 210 COVID-19 patient samples at Yale New Haven Hospital. Here, Diffusion EMD can derive distances between patients on the manifold of cells at least two orders of magnitude faster than equally accurate methods. This distance matrix between patients can be embedded into a higher level patient manifold which uncovers structure and heterogeneity in patients. More generally, Diffusion EMD is applicable to all datasets that are massively collected in parallel in many medical and biological systems.", "bibtex": "@InProceedings{pmlr-v139-tong21a,\n title = \t {Diffusion Earth Mover\u2019s Distance and Distribution Embeddings},\n author = {Tong, Alexander Y and Huguet, Guillaume and Natik, Amine and Macdonald, Kincaid and Kuchroo, Manik and Coifman, Ronald and Wolf, Guy and Krishnaswamy, Smita},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10336--10346},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tong21a/tong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tong21a.html},\n abstract = \t {We propose a new fast method of measuring distances between large numbers of related high dimensional datasets called the Diffusion Earth Mover\u2019s Distance (EMD). We model the datasets as distributions supported on common data graph that is derived from the affinity matrix computed on the combined data. In such cases where the graph is a discretization of an underlying Riemannian closed manifold, we prove that Diffusion EMD is topologically equivalent to the standard EMD with a geodesic ground distance. Diffusion EMD can be computed in {\u00d5}(n) time and is more accurate than similarly fast algorithms such as tree-based EMDs. We also show Diffusion EMD is fully differentiable, making it amenable to future uses in gradient-descent frameworks such as deep neural networks. Finally, we demonstrate an application of Diffusion EMD to single cell data collected from 210 COVID-19 patient samples at Yale New Haven Hospital. Here, Diffusion EMD can derive distances between patients on the manifold of cells at least two orders of magnitude faster than equally accurate methods. This distance matrix between patients can be embedded into a higher level patient manifold which uncovers structure and heterogeneity in patients. More generally, Diffusion EMD is applicable to all datasets that are massively collected in parallel in many medical and biological systems.}\n}", "pdf": "http://proceedings.mlr.press/v139/tong21a/tong21a.pdf", "supp": "", "pdf_size": 3201012, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6474297301621856749&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Dept. of Comp. Sci., Yale University; Dept. of Math. & Stat., Universit \u00b4e de Montr \u00b4eal + Mila \u2013 Quebec AI Institute; Dept. of Math. & Stat., Universit \u00b4e de Montr \u00b4eal + Mila \u2013 Quebec AI Institute; Dept. of Math., Yale University; Department of Genetics, Yale University; Dept. of Math., Yale University; Dept. of Math. & Stat., Universit \u00b4e de Montr \u00b4eal + Mila \u2013 Quebec AI Institute; Department of Genetics, Yale University", "aff_domain": "yale.edu; ; ; ; ; ; ;yale.edu", "email": "yale.edu; ; ; ; ; ; ;yale.edu", "github": "https://github.com/KrishnaswamyLab/DiffusionEMD", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/tong21a.html", "aff_unique_index": "0;1+2;1+2;0;0;0;1+2;0", "aff_unique_norm": "Yale University;Universit\u00e9 de Montr\u00e9al;Quebec AI Institute", "aff_unique_dep": "Department of Computer Science;Dept. of Math. & Stat.;AI", "aff_unique_url": "https://www.yale.edu;https://www.umontreal.ca;https://mila.quebec", "aff_unique_abbr": "Yale;UdeM;Mila", "aff_campus_unique_index": "0;;;0;0;", "aff_campus_unique": "New Haven;", "aff_country_unique_index": "0;1+1;1+1;0;0;0;1+1;0", "aff_country_unique": "United States;Canada" }, { "title": "Diffusion Source Identification on Networks with Statistical Confidence", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9357", "id": "9357", "proceeding": "http://proceedings.mlr.press/v139/dawkins21a.html", "slides": "", "author_site": "Quinlan Dawkins, Tianxi Li, Haifeng Xu", "author": "Quinlan E Dawkins; Tianxi Li; Haifeng Xu", "abstract": "Diffusion source identification on networks is a problem of fundamental importance in a broad class of applications, including controlling the spreading of rumors on social media, identifying a computer virus over cyber networks, or identifying the disease center during epidemiology. Though this problem has received significant recent attention, most known approaches are well-studied in only very restrictive settings and lack theoretical guarantees for more realistic networks. We introduce a statistical framework for the study of this problem and develop a confidence set inference approach inspired by hypothesis testing. Our method efficiently produces a small subset of nodes, which provably covers the source node with any pre-specified confidence level without restrictive assumptions on network structures. To our knowledge, this is the first diffusion source identification method with a practically useful theoretical guarantee on general networks. We demonstrate our approach via extensive synthetic experiments on well-known random network models, a large data set of real-world networks as well as a mobility network between cities concerning the COVID-19 spreading in January 2020.", "bibtex": "@InProceedings{pmlr-v139-dawkins21a,\n title = \t {Diffusion Source Identification on Networks with Statistical Confidence},\n author = {Dawkins, Quinlan E and Li, Tianxi and Xu, Haifeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2500--2509},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dawkins21a/dawkins21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dawkins21a.html},\n abstract = \t {Diffusion source identification on networks is a problem of fundamental importance in a broad class of applications, including controlling the spreading of rumors on social media, identifying a computer virus over cyber networks, or identifying the disease center during epidemiology. Though this problem has received significant recent attention, most known approaches are well-studied in only very restrictive settings and lack theoretical guarantees for more realistic networks. We introduce a statistical framework for the study of this problem and develop a confidence set inference approach inspired by hypothesis testing. Our method efficiently produces a small subset of nodes, which provably covers the source node with any pre-specified confidence level without restrictive assumptions on network structures. To our knowledge, this is the first diffusion source identification method with a practically useful theoretical guarantee on general networks. We demonstrate our approach via extensive synthetic experiments on well-known random network models, a large data set of real-world networks as well as a mobility network between cities concerning the COVID-19 spreading in January 2020.}\n}", "pdf": "http://proceedings.mlr.press/v139/dawkins21a/dawkins21a.pdf", "supp": "", "pdf_size": 580694, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10980983164075964289&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dawkins21a.html" }, { "title": "Dimensionality Reduction for the Sum-of-Distances Metric", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9299", "id": "9299", "proceeding": "http://proceedings.mlr.press/v139/feng21a.html", "slides": "/media/icml-2021/Slides/9299.pdf", "author_site": "Zhili Feng, Praneeth Kacham, David Woodruff", "author": "Zhili Feng; Praneeth Kacham; David Woodruff", "abstract": "We give a dimensionality reduction procedure to approximate the sum of distances of a given set of $n$ points in $R^d$ to any \u201cshape\u201d that lies in a $k$-dimensional subspace. Here, by \u201cshape\u201d we mean any set of points in $R^d$. Our algorithm takes an input in the form of an $n \\times d$ matrix $A$, where each row of $A$ denotes a data point, and outputs a subspace $P$ of dimension $O(k^{3}/\\epsilon^6)$ such that the projections of each of the $n$ points onto the subspace $P$ and the distances of each of the points to the subspace $P$ are sufficient to obtain an $\\epsilon$-approximation to the sum of distances to any arbitrary shape that lies in a $k$-dimensional subspace of $R^d$. These include important problems such as $k$-median, $k$-subspace approximation, and $(j,l)$ subspace clustering with $j \\cdot l \\leq k$. Dimensionality reduction reduces the data storage requirement to $(n+d)k^{3}/\\epsilon^6$ from nnz$(A)$. Here nnz$(A)$ could potentially be as large as $nd$. Our algorithm runs in time nnz$(A)/\\epsilon^2 + (n+d)$poly$(k/\\epsilon)$, up to logarithmic factors. For dense matrices, where nnz$(A) \\approx nd$, we give a faster algorithm, that runs in time $nd + (n+d)$poly$(k/\\epsilon)$ up to logarithmic factors. Our dimensionality reduction algorithm can also be used to obtain poly$(k/\\epsilon)$ size coresets for $k$-median and $(k,1)$-subspace approximation problems in polynomial time.", "bibtex": "@InProceedings{pmlr-v139-feng21a,\n title = \t {Dimensionality Reduction for the Sum-of-Distances Metric},\n author = {Feng, Zhili and Kacham, Praneeth and Woodruff, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3220--3229},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21a/feng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21a.html},\n abstract = \t {We give a dimensionality reduction procedure to approximate the sum of distances of a given set of $n$ points in $R^d$ to any \u201cshape\u201d that lies in a $k$-dimensional subspace. Here, by \u201cshape\u201d we mean any set of points in $R^d$. Our algorithm takes an input in the form of an $n \\times d$ matrix $A$, where each row of $A$ denotes a data point, and outputs a subspace $P$ of dimension $O(k^{3}/\\epsilon^6)$ such that the projections of each of the $n$ points onto the subspace $P$ and the distances of each of the points to the subspace $P$ are sufficient to obtain an $\\epsilon$-approximation to the sum of distances to any arbitrary shape that lies in a $k$-dimensional subspace of $R^d$. These include important problems such as $k$-median, $k$-subspace approximation, and $(j,l)$ subspace clustering with $j \\cdot l \\leq k$. Dimensionality reduction reduces the data storage requirement to $(n+d)k^{3}/\\epsilon^6$ from nnz$(A)$. Here nnz$(A)$ could potentially be as large as $nd$. Our algorithm runs in time nnz$(A)/\\epsilon^2 + (n+d)$poly$(k/\\epsilon)$, up to logarithmic factors. For dense matrices, where nnz$(A) \\approx nd$, we give a faster algorithm, that runs in time $nd + (n+d)$poly$(k/\\epsilon)$ up to logarithmic factors. Our dimensionality reduction algorithm can also be used to obtain poly$(k/\\epsilon)$ size coresets for $k$-median and $(k,1)$-subspace approximation problems in polynomial time.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21a/feng21a.pdf", "supp": "", "pdf_size": 3190052, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4079156533032090250&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/feng21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Directed Graph Embeddings in Pseudo-Riemannian Manifolds", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10411", "id": "10411", "proceeding": "http://proceedings.mlr.press/v139/sim21a.html", "slides": "/media/icml-2021/Slides/10411.pdf", "author_site": "Aaron Sim, Maciej Wiatrak, Angus Brayne, P\u00e1id\u00ed Creed, Saee Paliwal", "author": "Aaron Sim; Maciej L Wiatrak; Angus Brayne; Paidi Creed; Saee Paliwal", "abstract": "The inductive biases of graph representation learning algorithms are often encoded in the background geometry of their embedding space. In this paper, we show that general directed graphs can be effectively represented by an embedding model that combines three components: a pseudo-Riemannian metric structure, a non-trivial global topology, and a unique likelihood function that explicitly incorporates a preferred direction in embedding space. We demonstrate the representational capabilities of this method by applying it to the task of link prediction on a series of synthetic and real directed graphs from natural language applications and biology. In particular, we show that low-dimensional cylindrical Minkowski and anti-de Sitter spacetimes can produce equal or better graph representations than curved Riemannian manifolds of higher dimensions.", "bibtex": "@InProceedings{pmlr-v139-sim21a,\n title = \t {Directed Graph Embeddings in Pseudo-Riemannian Manifolds},\n author = {Sim, Aaron and Wiatrak, Maciej L and Brayne, Angus and Creed, Paidi and Paliwal, Saee},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9681--9690},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sim21a/sim21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sim21a.html},\n abstract = \t {The inductive biases of graph representation learning algorithms are often encoded in the background geometry of their embedding space. In this paper, we show that general directed graphs can be effectively represented by an embedding model that combines three components: a pseudo-Riemannian metric structure, a non-trivial global topology, and a unique likelihood function that explicitly incorporates a preferred direction in embedding space. We demonstrate the representational capabilities of this method by applying it to the task of link prediction on a series of synthetic and real directed graphs from natural language applications and biology. In particular, we show that low-dimensional cylindrical Minkowski and anti-de Sitter spacetimes can produce equal or better graph representations than curved Riemannian manifolds of higher dimensions.}\n}", "pdf": "http://proceedings.mlr.press/v139/sim21a/sim21a.pdf", "supp": "", "pdf_size": 3487547, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15546935307894768775&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "BenevolentAI, London, United Kingdom; BenevolentAI, London, United Kingdom; BenevolentAI, London, United Kingdom; BenevolentAI, London, United Kingdom; BenevolentAI, London, United Kingdom", "aff_domain": "benevolent.ai; ; ; ; ", "email": "benevolent.ai; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/sim21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "BenevolentAI", "aff_unique_dep": "", "aff_unique_url": "https://www.benevolent.ai", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Directional Bias Amplification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10553", "id": "10553", "proceeding": "http://proceedings.mlr.press/v139/wang21t.html", "slides": "", "author_site": "Angelina Wang, Olga Russakovsky", "author": "Angelina Wang; Olga Russakovsky", "abstract": "Mitigating bias in machine learning systems requires refining our understanding of bias propagation pathways: from societal structures to large-scale data to trained models to impact on society. In this work, we focus on one aspect of the problem, namely bias amplification: the tendency of models to amplify the biases present in the data they are trained on. A metric for measuring bias amplification was introduced in the seminal work by Zhao et al. (2017); however, as we demonstrate, this metric suffers from a number of shortcomings including conflating different types of bias amplification and failing to account for varying base rates of protected attributes. We introduce and analyze a new, decoupled metric for measuring bias amplification, $BiasAmp_{\\rightarrow}$ (Directional Bias Amplification). We thoroughly analyze and discuss both the technical assumptions and normative implications of this metric. We provide suggestions about its measurement by cautioning against predicting sensitive attributes, encouraging the use of confidence intervals due to fluctuations in the fairness of models across runs, and discussing the limitations of what this metric captures. Throughout this paper, we work to provide an interrogative look at the technical measurement of bias amplification, guided by our normative ideas of what we want it to encompass. Code is located at https://github.com/princetonvisualai/directional-bias-amp.", "bibtex": "@InProceedings{pmlr-v139-wang21t,\n title = \t {Directional Bias Amplification},\n author = {Wang, Angelina and Russakovsky, Olga},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10882--10893},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21t/wang21t.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21t.html},\n abstract = \t {Mitigating bias in machine learning systems requires refining our understanding of bias propagation pathways: from societal structures to large-scale data to trained models to impact on society. In this work, we focus on one aspect of the problem, namely bias amplification: the tendency of models to amplify the biases present in the data they are trained on. A metric for measuring bias amplification was introduced in the seminal work by Zhao et al. (2017); however, as we demonstrate, this metric suffers from a number of shortcomings including conflating different types of bias amplification and failing to account for varying base rates of protected attributes. We introduce and analyze a new, decoupled metric for measuring bias amplification, $BiasAmp_{\\rightarrow}$ (Directional Bias Amplification). We thoroughly analyze and discuss both the technical assumptions and normative implications of this metric. We provide suggestions about its measurement by cautioning against predicting sensitive attributes, encouraging the use of confidence intervals due to fluctuations in the fairness of models across runs, and discussing the limitations of what this metric captures. Throughout this paper, we work to provide an interrogative look at the technical measurement of bias amplification, guided by our normative ideas of what we want it to encompass. Code is located at https://github.com/princetonvisualai/directional-bias-amp.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21t/wang21t.pdf", "supp": "", "pdf_size": 2877184, "gs_citation": 85, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16389460185229956032&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Princeton University; Princeton University", "aff_domain": "princeton.edu; ", "email": "princeton.edu; ", "github": "https://github.com/princetonvisualai/directional-bias-amp", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wang21t.html", "aff_unique_index": "0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Directional Graph Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10053", "id": "10053", "proceeding": "http://proceedings.mlr.press/v139/beani21a.html", "slides": "/media/icml-2021/Slides/10053.pdf", "author_site": "Dominique Beaini, Saro Passaro, Vincent L\u00e9tourneau, Will Hamilton, Gabriele Corso, Pietro Li\u00f3", "author": "Dominique Beaini; Saro Passaro; Vincent L\u00e9tourneau; Will Hamilton; Gabriele Corso; Pietro Li\u00f3", "abstract": "The lack of anisotropic kernels in graph neural networks (GNNs) strongly limits their expressiveness, contributing to well-known issues such as over-smoothing. To overcome this limitation, we propose the first globally consistent anisotropic kernels for GNNs, allowing for graph convolutions that are defined according to topologicaly-derived directional flows. First, by defining a vector field in the graph, we develop a method of applying directional derivatives and smoothing by projecting node-specific messages into the field. Then, we propose the use of the Laplacian eigenvectors as such vector field. We show that the method generalizes CNNs on an $n$-dimensional grid and is provably more discriminative than standard GNNs regarding the Weisfeiler-Lehman 1-WL test. We evaluate our method on different standard benchmarks and see a relative error reduction of 8% on the CIFAR10 graph dataset and 11% to 32% on the molecular ZINC dataset, and a relative increase in precision of 1.6% on the MolPCBA dataset. An important outcome of this work is that it enables graph networks to embed directions in an unsupervised way, thus allowing a better representation of the anisotropic features in different physical or biological problems.", "bibtex": "@InProceedings{pmlr-v139-beaini21a,\n title = \t {Directional Graph Networks},\n author = {Beaini, Dominique and Passaro, Saro and L{\\'e}tourneau, Vincent and Hamilton, Will and Corso, Gabriele and Li{\\'o}, Pietro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {748--758},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/beaini21a/beaini21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/beaini21a.html},\n abstract = \t {The lack of anisotropic kernels in graph neural networks (GNNs) strongly limits their expressiveness, contributing to well-known issues such as over-smoothing. To overcome this limitation, we propose the first globally consistent anisotropic kernels for GNNs, allowing for graph convolutions that are defined according to topologicaly-derived directional flows. First, by defining a vector field in the graph, we develop a method of applying directional derivatives and smoothing by projecting node-specific messages into the field. Then, we propose the use of the Laplacian eigenvectors as such vector field. We show that the method generalizes CNNs on an $n$-dimensional grid and is provably more discriminative than standard GNNs regarding the Weisfeiler-Lehman 1-WL test. We evaluate our method on different standard benchmarks and see a relative error reduction of 8% on the CIFAR10 graph dataset and 11% to 32% on the molecular ZINC dataset, and a relative increase in precision of 1.6% on the MolPCBA dataset. An important outcome of this work is that it enables graph networks to embed directions in an unsupervised way, thus allowing a better representation of the anisotropic features in different physical or biological problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/beaini21a/beaini21a.pdf", "supp": "", "pdf_size": 2189369, "gs_citation": 225, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6256455976929564913&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/beaini21a.html" }, { "title": "Disambiguation of Weak Supervision leading to Exponential Convergence rates", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10613", "id": "10613", "proceeding": "http://proceedings.mlr.press/v139/cabannnes21a.html", "slides": "", "author_site": "Vivien Cabannnes, Francis Bach, Alessandro Rudi", "author": "Vivien A Cabannnes; Francis Bach; Alessandro Rudi", "abstract": "Machine learning approached through supervised learning requires expensive annotation of data. This motivates weakly supervised learning, where data are annotated with incomplete yet discriminative information. In this paper, we focus on partial labelling, an instance of weak supervision where, from a given input, we are given a set of potential targets. We review a disambiguation principle to recover full supervision from weak supervision, and propose an empirical disambiguation algorithm. We prove exponential convergence rates of our algorithm under classical learnability assumptions, and we illustrate the usefulness of our method on practical examples.", "bibtex": "@InProceedings{pmlr-v139-cabannnes21a,\n title = \t {Disambiguation of Weak Supervision leading to Exponential Convergence rates},\n author = {Cabannnes, Vivien A and Bach, Francis and Rudi, Alessandro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1147--1157},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cabannnes21a/cabannnes21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cabannnes21a.html},\n abstract = \t {Machine learning approached through supervised learning requires expensive annotation of data. This motivates weakly supervised learning, where data are annotated with incomplete yet discriminative information. In this paper, we focus on partial labelling, an instance of weak supervision where, from a given input, we are given a set of potential targets. We review a disambiguation principle to recover full supervision from weak supervision, and propose an empirical disambiguation algorithm. We prove exponential convergence rates of our algorithm under classical learnability assumptions, and we illustrate the usefulness of our method on practical examples.}\n}", "pdf": "http://proceedings.mlr.press/v139/cabannnes21a/cabannnes21a.pdf", "supp": "", "pdf_size": 535226, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7442145919088745696&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Institut National de Recherche en Informatique et en Automatique \u2013 D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure \u2013 PSL Research University; Institut National de Recherche en Informatique et en Automatique \u2013 D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure \u2013 PSL Research University; Institut National de Recherche en Informatique et en Automatique \u2013 D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure \u2013 PSL Research University", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cabannnes21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Institut National de Recherche en Informatique et en Automatique", "aff_unique_dep": "D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure", "aff_unique_url": "https://www.inria.fr", "aff_unique_abbr": "INRIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "Discovering symbolic policies with deep reinforcement learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9985", "id": "9985", "proceeding": "http://proceedings.mlr.press/v139/landajuela21a.html", "slides": "", "author_site": "Mikel Landajuela Larma, Brenden Petersen, Sookyung Kim, Claudio Santiago, Ruben Glatt, Nathan Mundhenk, Jacob Pettit, Daniel Faissol", "author": "Mikel Landajuela; Brenden K Petersen; Sookyung Kim; Claudio P Santiago; Ruben Glatt; Nathan Mundhenk; Jacob F Pettit; Daniel Faissol", "abstract": "Deep reinforcement learning (DRL) has proven successful for many difficult control problems by learning policies represented by neural networks. However, the complexity of neural network-based policies{\u2014}involving thousands of composed non-linear operators{\u2014}can render them problematic to understand, trust, and deploy. In contrast, simple policies comprising short symbolic expressions can facilitate human understanding, while also being transparent and exhibiting predictable behavior. To this end, we propose deep symbolic policy, a novel approach to directly search the space of symbolic policies. We use an autoregressive recurrent neural network to generate control policies represented by tractable mathematical expressions, employing a risk-seeking policy gradient to maximize performance of the generated policies. To scale to environments with multi-dimensional action spaces, we propose an \"anchoring\" algorithm that distills pre-trained neural network-based policies into fully symbolic policies, one action dimension at a time. We also introduce two novel methods to improve exploration in DRL-based combinatorial optimization, building on ideas of entropy regularization and distribution initialization. Despite their dramatically reduced complexity, we demonstrate that discovered symbolic policies outperform seven state-of-the-art DRL algorithms in terms of average rank and average normalized episodic reward across eight benchmark environments.", "bibtex": "@InProceedings{pmlr-v139-landajuela21a,\n title = \t {Discovering symbolic policies with deep reinforcement learning},\n author = {Landajuela, Mikel and Petersen, Brenden K and Kim, Sookyung and Santiago, Claudio P and Glatt, Ruben and Mundhenk, Nathan and Pettit, Jacob F and Faissol, Daniel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5979--5989},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/landajuela21a/landajuela21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/landajuela21a.html},\n abstract = \t {Deep reinforcement learning (DRL) has proven successful for many difficult control problems by learning policies represented by neural networks. However, the complexity of neural network-based policies{\u2014}involving thousands of composed non-linear operators{\u2014}can render them problematic to understand, trust, and deploy. In contrast, simple policies comprising short symbolic expressions can facilitate human understanding, while also being transparent and exhibiting predictable behavior. To this end, we propose deep symbolic policy, a novel approach to directly search the space of symbolic policies. We use an autoregressive recurrent neural network to generate control policies represented by tractable mathematical expressions, employing a risk-seeking policy gradient to maximize performance of the generated policies. To scale to environments with multi-dimensional action spaces, we propose an \"anchoring\" algorithm that distills pre-trained neural network-based policies into fully symbolic policies, one action dimension at a time. We also introduce two novel methods to improve exploration in DRL-based combinatorial optimization, building on ideas of entropy regularization and distribution initialization. Despite their dramatically reduced complexity, we demonstrate that discovered symbolic policies outperform seven state-of-the-art DRL algorithms in terms of average rank and average normalized episodic reward across eight benchmark environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/landajuela21a/landajuela21a.pdf", "supp": "", "pdf_size": 1716793, "gs_citation": 135, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13358753313177642381&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory; Lawrence Livermore National Laboratory", "aff_domain": "llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov", "email": "llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov;llnl.gov", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/landajuela21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Lawrence Livermore National Laboratory", "aff_unique_dep": "", "aff_unique_url": "https://www.llnl.gov", "aff_unique_abbr": "LLNL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Discrete-Valued Latent Preference Matrix Estimation with Graph Side Information", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10051", "id": "10051", "proceeding": "http://proceedings.mlr.press/v139/jo21a.html", "slides": "", "author_site": "Changhun Jo, Kangwook Lee", "author": "Changhun Jo; Kangwook Lee", "abstract": "Incorporating graph side information into recommender systems has been widely used to better predict ratings, but relatively few works have focused on theoretical guarantees. Ahn et al. (2018) firstly characterized the optimal sample complexity in the presence of graph side information, but the results are limited due to strict, unrealistic assumptions made on the unknown latent preference matrix and the structure of user clusters. In this work, we propose a new model in which 1) the unknown latent preference matrix can have any discrete values, and 2) users can be clustered into multiple clusters, thereby relaxing the assumptions made in prior work. Under this new model, we fully characterize the optimal sample complexity and develop a computationally-efficient algorithm that matches the optimal sample complexity. Our algorithm is robust to model errors and outperforms the existing algorithms in terms of prediction performance on both synthetic and real data.", "bibtex": "@InProceedings{pmlr-v139-jo21a,\n title = \t {Discrete-Valued Latent Preference Matrix Estimation with Graph Side Information},\n author = {Jo, Changhun and Lee, Kangwook},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5107--5117},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jo21a/jo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jo21a.html},\n abstract = \t {Incorporating graph side information into recommender systems has been widely used to better predict ratings, but relatively few works have focused on theoretical guarantees. Ahn et al. (2018) firstly characterized the optimal sample complexity in the presence of graph side information, but the results are limited due to strict, unrealistic assumptions made on the unknown latent preference matrix and the structure of user clusters. In this work, we propose a new model in which 1) the unknown latent preference matrix can have any discrete values, and 2) users can be clustered into multiple clusters, thereby relaxing the assumptions made in prior work. Under this new model, we fully characterize the optimal sample complexity and develop a computationally-efficient algorithm that matches the optimal sample complexity. Our algorithm is robust to model errors and outperforms the existing algorithms in terms of prediction performance on both synthetic and real data.}\n}", "pdf": "http://proceedings.mlr.press/v139/jo21a/jo21a.pdf", "supp": "", "pdf_size": 678283, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10068942929867829536&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Mathematics, University of Wisconsin-Madison, Madison, Wisconsin, USA+Department of Electrical and Computer Engineering, University of Wisconsin-Madison, Madison, Wisconsin, USA; Department of Electrical and Computer Engineering, University of Wisconsin-Madison, Madison, Wisconsin, USA", "aff_domain": "wisc.edu;wisc.edu", "email": "wisc.edu;wisc.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jo21a.html", "aff_unique_index": "0+0;0", "aff_unique_norm": "University of Wisconsin-Madison", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW-Madison", "aff_campus_unique_index": "0+0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0+0;0", "aff_country_unique": "United States" }, { "title": "Discretization Drift in Two-Player Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10315", "id": "10315", "proceeding": "http://proceedings.mlr.press/v139/rosca21a.html", "slides": "/media/icml-2021/Slides/10315.pdf", "author_site": "Mihaela Rosca, Yan Wu, Benoit Dherin, David GT Barrett", "author": "Mihaela C Rosca; Yan Wu; Benoit Dherin; David Barrett", "abstract": "Gradient-based methods for two-player games produce rich dynamics that can solve challenging problems, yet can be difficult to stabilize and understand. Part of this complexity originates from the discrete update steps given by simultaneous or alternating gradient descent, which causes each player to drift away from the continuous gradient flow \u2013 a phenomenon we call discretization drift. Using backward error analysis, we derive modified continuous dynamical systems that closely follow the discrete dynamics. These modified dynamics provide an insight into the notorious challenges associated with zero-sum games, including Generative Adversarial Networks. In particular, we identify distinct components of the discretization drift that can alter performance and in some cases destabilize the game. Finally, quantifying discretization drift allows us to identify regularizers that explicitly cancel harmful forms of drift or strengthen beneficial forms of drift, and thus improve performance of GAN training.", "bibtex": "@InProceedings{pmlr-v139-rosca21a,\n title = \t {Discretization Drift in Two-Player Games},\n author = {Rosca, Mihaela C and Wu, Yan and Dherin, Benoit and Barrett, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9064--9074},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rosca21a/rosca21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rosca21a.html},\n abstract = \t {Gradient-based methods for two-player games produce rich dynamics that can solve challenging problems, yet can be difficult to stabilize and understand. Part of this complexity originates from the discrete update steps given by simultaneous or alternating gradient descent, which causes each player to drift away from the continuous gradient flow \u2013 a phenomenon we call discretization drift. Using backward error analysis, we derive modified continuous dynamical systems that closely follow the discrete dynamics. These modified dynamics provide an insight into the notorious challenges associated with zero-sum games, including Generative Adversarial Networks. In particular, we identify distinct components of the discretization drift that can alter performance and in some cases destabilize the game. Finally, quantifying discretization drift allows us to identify regularizers that explicitly cancel harmful forms of drift or strengthen beneficial forms of drift, and thus improve performance of GAN training.}\n}", "pdf": "http://proceedings.mlr.press/v139/rosca21a/rosca21a.pdf", "supp": "", "pdf_size": 3497585, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5098459478601130257&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "DeepMind, London, UK + Center for Arti\ufb01cial Intelligence, University College London; DeepMind, London, UK; Google, Dublin, Ireland; DeepMind, London, UK", "aff_domain": "deepmind.com; ; ; ", "email": "deepmind.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rosca21a.html", "aff_unique_index": "0+1;0;2;0", "aff_unique_norm": "DeepMind;University College London;Google", "aff_unique_dep": ";Center for Arti\ufb01cial Intelligence;Google", "aff_unique_url": "https://deepmind.com;https://www.ucl.ac.uk;https://www.google.com", "aff_unique_abbr": "DeepMind;UCL;Google", "aff_campus_unique_index": "0+0;0;1;0", "aff_campus_unique": "London;Dublin", "aff_country_unique_index": "0+0;0;1;0", "aff_country_unique": "United Kingdom;Ireland" }, { "title": "Discriminative Complementary-Label Learning with Weighted Loss", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10739", "id": "10739", "proceeding": "http://proceedings.mlr.press/v139/gao21d.html", "slides": "/media/icml-2021/Slides/10739.pdf", "author_site": "Yi Gao, Min-Ling Zhang", "author": "Yi Gao; Min-Ling Zhang", "abstract": "Complementary-label learning (CLL) deals with the weak supervision scenario where each training instance is associated with one \\emph{complementary} label, which specifies the class label that the instance does \\emph{not} belong to. Given the training instance ${\\bm x}$, existing CLL approaches aim at modeling the \\emph{generative} relationship between the complementary label $\\bar y$, i.e. $P(\\bar y\\mid {\\bm x})$, and the ground-truth label $y$, i.e. $P(y\\mid {\\bm x})$. Nonetheless, as the ground-truth label is not directly accessible for complementarily labeled training instance, strong generative assumptions may not hold for real-world CLL tasks. In this paper, we derive a simple and theoretically-sound \\emph{discriminative} model towards $P(\\bar y\\mid {\\bm x})$, which naturally leads to a risk estimator with estimation error bound at $\\mathcal{O}(1/\\sqrt{n})$ convergence rate. Accordingly, a practical CLL approach is proposed by further introducing weighted loss to the empirical risk to maximize the predictive gap between potential ground-truth label and complementary label. Extensive experiments clearly validate the effectiveness of the proposed discriminative complementary-label learning approach.", "bibtex": "@InProceedings{pmlr-v139-gao21d,\n title = \t {Discriminative Complementary-Label Learning with Weighted Loss},\n author = {Gao, Yi and Zhang, Min-Ling},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3587--3597},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gao21d/gao21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/gao21d.html},\n abstract = \t {Complementary-label learning (CLL) deals with the weak supervision scenario where each training instance is associated with one \\emph{complementary} label, which specifies the class label that the instance does \\emph{not} belong to. Given the training instance ${\\bm x}$, existing CLL approaches aim at modeling the \\emph{generative} relationship between the complementary label $\\bar y$, i.e. $P(\\bar y\\mid {\\bm x})$, and the ground-truth label $y$, i.e. $P(y\\mid {\\bm x})$. Nonetheless, as the ground-truth label is not directly accessible for complementarily labeled training instance, strong generative assumptions may not hold for real-world CLL tasks. In this paper, we derive a simple and theoretically-sound \\emph{discriminative} model towards $P(\\bar y\\mid {\\bm x})$, which naturally leads to a risk estimator with estimation error bound at $\\mathcal{O}(1/\\sqrt{n})$ convergence rate. Accordingly, a practical CLL approach is proposed by further introducing weighted loss to the empirical risk to maximize the predictive gap between potential ground-truth label and complementary label. Extensive experiments clearly validate the effectiveness of the proposed discriminative complementary-label learning approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/gao21d/gao21d.pdf", "supp": "", "pdf_size": 1739369, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=327432517751328713&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Cyber Science and Engineering, Southeast University, Nanjing 210096, China+Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, China; School of Computer Science and Engineering, Southeast University, Nanjing 210096, China", "aff_domain": "seu.edu.cn; ", "email": "seu.edu.cn; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gao21d.html", "aff_unique_index": "0+0;0", "aff_unique_norm": "Southeast University", "aff_unique_dep": "School of Cyber Science and Engineering", "aff_unique_url": "https://www.seu.edu.cn/", "aff_unique_abbr": "SEU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nanjing;", "aff_country_unique_index": "0+0;0", "aff_country_unique": "China" }, { "title": "Disentangling Sampling and Labeling Bias for Learning in Large-output Spaces", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10435", "id": "10435", "proceeding": "http://proceedings.mlr.press/v139/rawat21a.html", "slides": "", "author_site": "Ankit Singh Rawat, Aditya Menon, Wittawat Jitkrittum, Sadeep Jayasumana, Felix Xinnan Yu, Sashank Jakkam Reddi, Sanjiv Kumar", "author": "Ankit Singh Rawat; Aditya K Menon; Wittawat Jitkrittum; Sadeep Jayasumana; Felix Yu; Sashank Reddi; Sanjiv Kumar", "abstract": "Negative sampling schemes enable efficient training given a large number of classes, by offering a means to approximate a computationally expensive loss function that takes all labels into account. In this paper, we present a new connection between these schemes and loss modification techniques for countering label imbalance. We show that different negative sampling schemes implicitly trade-off performance on dominant versus rare labels. Further, we provide a unified means to explicitly tackle both sampling bias, arising from working with a subset of all labels, and labeling bias, which is inherent to the data due to label imbalance. We empirically verify our findings on long-tail classification and retrieval benchmarks.", "bibtex": "@InProceedings{pmlr-v139-rawat21a,\n title = \t {Disentangling Sampling and Labeling Bias for Learning in Large-output Spaces},\n author = {Rawat, Ankit Singh and Menon, Aditya K and Jitkrittum, Wittawat and Jayasumana, Sadeep and Yu, Felix and Reddi, Sashank and Kumar, Sanjiv},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8890--8901},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rawat21a/rawat21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rawat21a.html},\n abstract = \t {Negative sampling schemes enable efficient training given a large number of classes, by offering a means to approximate a computationally expensive loss function that takes all labels into account. In this paper, we present a new connection between these schemes and loss modification techniques for countering label imbalance. We show that different negative sampling schemes implicitly trade-off performance on dominant versus rare labels. Further, we provide a unified means to explicitly tackle both sampling bias, arising from working with a subset of all labels, and labeling bias, which is inherent to the data due to label imbalance. We empirically verify our findings on long-tail classification and retrieval benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/rawat21a/rawat21a.pdf", "supp": "", "pdf_size": 764133, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14994320499888865478&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Google Research, New York, USA; Google Research, New York, USA; Google Research, New York, USA; Google Research, New York, USA; Google Research, New York, USA; Google Research, New York, USA; Google Research, New York, USA", "aff_domain": "google.com;google.com; ; ; ; ; ", "email": "google.com;google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/rawat21a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Disentangling syntax and semantics in the brain with deep networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9271", "id": "9271", "proceeding": "http://proceedings.mlr.press/v139/caucheteux21a.html", "slides": "/media/icml-2021/Slides/9271_rcXwrEs.pdf", "author_site": "Charlotte Caucheteux, Alexandre Gramfort, Jean-Remi King", "author": "Charlotte Caucheteux; Alexandre Gramfort; Jean-Remi King", "abstract": "The activations of language transformers like GPT-2 have been shown to linearly map onto brain activity during speech comprehension. However, the nature of these activations remains largely unknown and presumably conflate distinct linguistic classes. Here, we propose a taxonomy to factorize the high-dimensional activations of language models into four combinatorial classes: lexical, compositional, syntactic, and semantic representations. We then introduce a statistical method to decompose, through the lens of GPT-2\u2019s activations, the brain activity of 345 subjects recorded with functional magnetic resonance imaging (fMRI) during the listening of \u00a04.6 hours of narrated text. The results highlight two findings. First, compositional representations recruit a more widespread cortical network than lexical ones, and encompass the bilateral temporal, parietal and prefrontal cortices. Second, contrary to previous claims, syntax and semantics are not associated with separated modules, but, instead, appear to share a common and distributed neural substrate. Overall, this study introduces a versatile framework to isolate, in the brain activity, the distributed representations of linguistic constructs.", "bibtex": "@InProceedings{pmlr-v139-caucheteux21a,\n title = \t {Disentangling syntax and semantics in the brain with deep networks},\n author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-Remi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1336--1348},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/caucheteux21a/caucheteux21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/caucheteux21a.html},\n abstract = \t {The activations of language transformers like GPT-2 have been shown to linearly map onto brain activity during speech comprehension. However, the nature of these activations remains largely unknown and presumably conflate distinct linguistic classes. Here, we propose a taxonomy to factorize the high-dimensional activations of language models into four combinatorial classes: lexical, compositional, syntactic, and semantic representations. We then introduce a statistical method to decompose, through the lens of GPT-2\u2019s activations, the brain activity of 345 subjects recorded with functional magnetic resonance imaging (fMRI) during the listening of \u00a04.6 hours of narrated text. The results highlight two findings. First, compositional representations recruit a more widespread cortical network than lexical ones, and encompass the bilateral temporal, parietal and prefrontal cortices. Second, contrary to previous claims, syntax and semantics are not associated with separated modules, but, instead, appear to share a common and distributed neural substrate. Overall, this study introduces a versatile framework to isolate, in the brain activity, the distributed representations of linguistic constructs.}\n}", "pdf": "http://proceedings.mlr.press/v139/caucheteux21a/caucheteux21a.pdf", "supp": "", "pdf_size": 3988336, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11820308376064624637&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Inria, Saclay, France+Facebook AI Research, Paris, France; Inria, Saclay, France; Facebook AI Research, Paris, France+\u00c9cole normale sup\u00e9rieure, PSL University, CNRS, Paris, France", "aff_domain": "fb.com; ; ", "email": "fb.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/caucheteux21a.html", "aff_unique_index": "0+1;0;1+2", "aff_unique_norm": "INRIA;Meta;\u00c9cole Normale Sup\u00e9rieure", "aff_unique_dep": ";Facebook AI Research;", "aff_unique_url": "https://www.inria.fr;https://research.facebook.com;https://www.ens.fr", "aff_unique_abbr": "Inria;FAIR;ENS", "aff_campus_unique_index": "0+1;0;1+1", "aff_campus_unique": "Saclay;Paris", "aff_country_unique_index": "0+0;0;0+0", "aff_country_unique": "France" }, { "title": "Dissecting Supervised Contrastive Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9677", "id": "9677", "proceeding": "http://proceedings.mlr.press/v139/graf21a.html", "slides": "/media/icml-2021/Slides/9677.pdf", "author_site": "Florian Graf, Christoph Hofer, Marc Niethammer, Roland Kwitt", "author": "Florian Graf; Christoph Hofer; Marc Niethammer; Roland Kwitt", "abstract": "Minimizing cross-entropy over the softmax scores of a linear map composed with a high-capacity encoder is arguably the most popular choice for training neural networks on supervised learning tasks. However, recent works show that one can directly optimize the encoder instead, to obtain equally (or even more) discriminative representations via a supervised variant of a contrastive objective. In this work, we address the question whether there are fundamental differences in the sought-for representation geometry in the output space of the encoder at minimal loss. Specifically, we prove, under mild assumptions, that both losses attain their minimum once the representations of each class collapse to the vertices of a regular simplex, inscribed in a hypersphere. We provide empirical evidence that this configuration is attained in practice and that reaching a close-to-optimal state typically indicates good generalization performance. Yet, the two losses show remarkably different optimization behavior. The number of iterations required to perfectly fit to data scales superlinearly with the amount of randomly flipped labels for the supervised contrastive loss. This is in contrast to the approximately linear scaling previously reported for networks trained with cross-entropy.", "bibtex": "@InProceedings{pmlr-v139-graf21a,\n title = \t {Dissecting Supervised Contrastive Learning},\n author = {Graf, Florian and Hofer, Christoph and Niethammer, Marc and Kwitt, Roland},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3821--3830},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/graf21a/graf21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/graf21a.html},\n abstract = \t {Minimizing cross-entropy over the softmax scores of a linear map composed with a high-capacity encoder is arguably the most popular choice for training neural networks on supervised learning tasks. However, recent works show that one can directly optimize the encoder instead, to obtain equally (or even more) discriminative representations via a supervised variant of a contrastive objective. In this work, we address the question whether there are fundamental differences in the sought-for representation geometry in the output space of the encoder at minimal loss. Specifically, we prove, under mild assumptions, that both losses attain their minimum once the representations of each class collapse to the vertices of a regular simplex, inscribed in a hypersphere. We provide empirical evidence that this configuration is attained in practice and that reaching a close-to-optimal state typically indicates good generalization performance. Yet, the two losses show remarkably different optimization behavior. The number of iterations required to perfectly fit to data scales superlinearly with the amount of randomly flipped labels for the supervised contrastive loss. This is in contrast to the approximately linear scaling previously reported for networks trained with cross-entropy.}\n}", "pdf": "http://proceedings.mlr.press/v139/graf21a/graf21a.pdf", "supp": "", "pdf_size": 828132, "gs_citation": 181, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15842603334888826339&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Salzburg, Austria; Department of Computer Science, University of Salzburg, Austria; UNC Chapel Hill; Department of Computer Science, University of Salzburg, Austria", "aff_domain": "sbg.ac.at; ; ; ", "email": "sbg.ac.at; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/graf21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "University of Salzburg;University of North Carolina at Chapel Hill", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.uni-salzburg.at;https://www.unc.edu", "aff_unique_abbr": ";UNC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Chapel Hill", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "Austria;United States" }, { "title": "Distributed Nystr\u00f6m Kernel Learning with Communications", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8483", "id": "8483", "proceeding": "http://proceedings.mlr.press/v139/yin21a.html", "slides": "/media/icml-2021/Slides/8483.pdf", "author_site": "Rong Yin, Weiping Wang, Dan Meng", "author": "Rong Yin; Weiping Wang; Dan Meng", "abstract": "We study the statistical performance for distributed kernel ridge regression with Nystr\u00f6m (DKRR-NY) and with Nystr\u00f6m and iterative solvers (DKRR-NY-PCG) and successfully derive the optimal learning rates, which can improve the ranges of the number of local processors $p$ to the optimal in existing state-of-art bounds. More precisely, our theoretical analysis show that DKRR-NY and DKRR-NY-PCG achieve the same learning rates as the exact KRR requiring essentially $\\mathcal{O}(|D|^{1.5})$ time and $\\mathcal{O}(|D|)$ memory with relaxing the restriction on $p$ in expectation, where $|D|$ is the number of data, which exhibits the average effectiveness of multiple trials. Furthermore, for showing the generalization performance in a single trial, we deduce the learning rates for DKRR-NY and DKRR-NY-PCG in probability. Finally, we propose a novel algorithm DKRR-NY-CM based on DKRR-NY, which employs a communication strategy to further improve the learning performance, whose effectiveness of communications is validated in theoretical and experimental analysis.", "bibtex": "@InProceedings{pmlr-v139-yin21a,\n title = \t {Distributed Nystr\u00f6m Kernel Learning with Communications},\n author = {Yin, Rong and Wang, Weiping and Meng, Dan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12019--12028},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yin21a/yin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yin21a.html},\n abstract = \t {We study the statistical performance for distributed kernel ridge regression with Nystr\u00f6m (DKRR-NY) and with Nystr\u00f6m and iterative solvers (DKRR-NY-PCG) and successfully derive the optimal learning rates, which can improve the ranges of the number of local processors $p$ to the optimal in existing state-of-art bounds. More precisely, our theoretical analysis show that DKRR-NY and DKRR-NY-PCG achieve the same learning rates as the exact KRR requiring essentially $\\mathcal{O}(|D|^{1.5})$ time and $\\mathcal{O}(|D|)$ memory with relaxing the restriction on $p$ in expectation, where $|D|$ is the number of data, which exhibits the average effectiveness of multiple trials. Furthermore, for showing the generalization performance in a single trial, we deduce the learning rates for DKRR-NY and DKRR-NY-PCG in probability. Finally, we propose a novel algorithm DKRR-NY-CM based on DKRR-NY, which employs a communication strategy to further improve the learning performance, whose effectiveness of communications is validated in theoretical and experimental analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/yin21a/yin21a.pdf", "supp": "", "pdf_size": 332565, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12434168585283326687&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yin21a.html" }, { "title": "Distributed Second Order Methods with Fast Rates and Compressed Communication", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10191", "id": "10191", "proceeding": "http://proceedings.mlr.press/v139/islamov21a.html", "slides": "", "author_site": "Rustem Islamov, Xun Qian, Peter Richtarik", "author": "Rustem Islamov; Xun Qian; Peter Richtarik", "abstract": "We develop several new communication-efficient second-order methods for distributed optimization. Our first method, NEWTON-STAR, is a variant of Newton\u2019s method from which it inherits its fast local quadratic rate. However, unlike Newton\u2019s method, NEWTON-STAR enjoys the same per iteration communication cost as gradient descent. While this method is impractical as it relies on the use of certain unknown parameters characterizing the Hessian of the objective function at the optimum, it serves as the starting point which enables us to design practical variants thereof with strong theoretical guarantees. In particular, we design a stochastic sparsification strategy for learning the unknown parameters in an iterative fashion in a communication efficient manner. Applying this strategy to NEWTON-STAR leads to our next method, NEWTON-LEARN, for which we prove local linear and superlinear rates independent of the condition number. When applicable, this method can have dramatically superior convergence behavior when compared to state-of-the-art methods. Finally, we develop a globalization strategy using cubic regularization which leads to our next method, CUBIC-NEWTON-LEARN, for which we prove global sublinear and linear convergence rates, and a fast superlinear rate. Our results are supported with experimental results on real datasets, and show several orders of magnitude improvement on baseline and state-of-the-art methods in terms of communication complexity.", "bibtex": "@InProceedings{pmlr-v139-islamov21a,\n title = \t {Distributed Second Order Methods with Fast Rates and Compressed Communication},\n author = {Islamov, Rustem and Qian, Xun and Richtarik, Peter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4617--4628},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/islamov21a/islamov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/islamov21a.html},\n abstract = \t {We develop several new communication-efficient second-order methods for distributed optimization. Our first method, NEWTON-STAR, is a variant of Newton\u2019s method from which it inherits its fast local quadratic rate. However, unlike Newton\u2019s method, NEWTON-STAR enjoys the same per iteration communication cost as gradient descent. While this method is impractical as it relies on the use of certain unknown parameters characterizing the Hessian of the objective function at the optimum, it serves as the starting point which enables us to design practical variants thereof with strong theoretical guarantees. In particular, we design a stochastic sparsification strategy for learning the unknown parameters in an iterative fashion in a communication efficient manner. Applying this strategy to NEWTON-STAR leads to our next method, NEWTON-LEARN, for which we prove local linear and superlinear rates independent of the condition number. When applicable, this method can have dramatically superior convergence behavior when compared to state-of-the-art methods. Finally, we develop a globalization strategy using cubic regularization which leads to our next method, CUBIC-NEWTON-LEARN, for which we prove global sublinear and linear convergence rates, and a fast superlinear rate. Our results are supported with experimental results on real datasets, and show several orders of magnitude improvement on baseline and state-of-the-art methods in terms of communication complexity.}\n}", "pdf": "http://proceedings.mlr.press/v139/islamov21a/islamov21a.pdf", "supp": "", "pdf_size": 4180777, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2570668261080611641&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia+Moscow Institute of Physics and Technology, Dolgoprudny, Russia; King Abdullah University of Science and Technology, Thuwal, Saudi Arabia; King Abdullah University of Science and Technology, Thuwal, Saudi Arabia", "aff_domain": "phystech.edu; ; ", "email": "phystech.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/islamov21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "King Abdullah University of Science and Technology;Moscow Institute of Physics and Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.kast.kau.edu.sa;https://www.mipt.ru", "aff_unique_abbr": "KAUST;MIPT", "aff_campus_unique_index": "0+1;0;0", "aff_campus_unique": "Thuwal;Dolgoprudny", "aff_country_unique_index": "0+1;0;0", "aff_country_unique": "Saudi Arabia;Russian Federation" }, { "title": "Distribution-Free Calibration Guarantees for Histogram Binning without Sample Splitting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10205", "id": "10205", "proceeding": "http://proceedings.mlr.press/v139/gupta21b.html", "slides": "/media/icml-2021/Slides/10205.pdf", "author_site": "Chirag Gupta, Aaditya Ramdas", "author": "Chirag Gupta; Aaditya Ramdas", "abstract": "We prove calibration guarantees for the popular histogram binning (also called uniform-mass binning) method of Zadrozny and Elkan (2001). Histogram binning has displayed strong practical performance, but theoretical guarantees have only been shown for sample split versions that avoid \u2019double dipping\u2019 the data. We demonstrate that the statistical cost of sample splitting is practically significant on a credit default dataset. We then prove calibration guarantees for the original method that double dips the data, using a certain Markov property of order statistics. Based on our results, we make practical recommendations for choosing the number of bins in histogram binning. In our illustrative simulations, we propose a new tool for assessing calibration\u2014validity plots\u2014which provide more information than an ECE estimate.", "bibtex": "@InProceedings{pmlr-v139-gupta21b,\n title = \t {Distribution-Free Calibration Guarantees for Histogram Binning without Sample Splitting},\n author = {Gupta, Chirag and Ramdas, Aaditya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3942--3952},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gupta21b/gupta21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/gupta21b.html},\n abstract = \t {We prove calibration guarantees for the popular histogram binning (also called uniform-mass binning) method of Zadrozny and Elkan (2001). Histogram binning has displayed strong practical performance, but theoretical guarantees have only been shown for sample split versions that avoid \u2019double dipping\u2019 the data. We demonstrate that the statistical cost of sample splitting is practically significant on a credit default dataset. We then prove calibration guarantees for the original method that double dips the data, using a certain Markov property of order statistics. Based on our results, we make practical recommendations for choosing the number of bins in histogram binning. In our illustrative simulations, we propose a new tool for assessing calibration\u2014validity plots\u2014which provide more information than an ECE estimate.}\n}", "pdf": "http://proceedings.mlr.press/v139/gupta21b/gupta21b.pdf", "supp": "", "pdf_size": 910302, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1595974871643501822&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cmu.edu; ", "email": "cmu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gupta21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Distributionally Robust Optimization with Markovian Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10259", "id": "10259", "proceeding": "http://proceedings.mlr.press/v139/li21t.html", "slides": "", "author_site": "Mengmeng Li, Tobias Sutter, Daniel Kuhn", "author": "Mengmeng Li; Tobias Sutter; Daniel Kuhn", "abstract": "We study a stochastic program where the probability distribution of the uncertain problem parameters is unknown and only indirectly observed via finitely many correlated samples generated by an unknown Markov chain with $d$ states. We propose a data-driven distributionally robust optimization model to estimate the problem\u2019s objective function and optimal solution. By leveraging results from large deviations theory, we derive statistical guarantees on the quality of these estimators. The underlying worst-case expectation problem is nonconvex and involves $\\mathcal O(d^2)$ decision variables. Thus, it cannot be solved efficiently for large $d$. By exploiting the structure of this problem, we devise a customized Frank-Wolfe algorithm with convex direction-finding subproblems of size $\\mathcal O(d)$. We prove that this algorithm finds a stationary point efficiently under mild conditions. The efficiency of the method is predicated on a dimensionality reduction enabled by a dual reformulation. Numerical experiments indicate that our approach has better computational and statistical properties than the state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-li21t,\n title = \t {Distributionally Robust Optimization with Markovian Data},\n author = {Li, Mengmeng and Sutter, Tobias and Kuhn, Daniel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6493--6503},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21t/li21t.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21t.html},\n abstract = \t {We study a stochastic program where the probability distribution of the uncertain problem parameters is unknown and only indirectly observed via finitely many correlated samples generated by an unknown Markov chain with $d$ states. We propose a data-driven distributionally robust optimization model to estimate the problem\u2019s objective function and optimal solution. By leveraging results from large deviations theory, we derive statistical guarantees on the quality of these estimators. The underlying worst-case expectation problem is nonconvex and involves $\\mathcal O(d^2)$ decision variables. Thus, it cannot be solved efficiently for large $d$. By exploiting the structure of this problem, we devise a customized Frank-Wolfe algorithm with convex direction-finding subproblems of size $\\mathcal O(d)$. We prove that this algorithm finds a stationary point efficiently under mild conditions. The efficiency of the method is predicated on a dimensionality reduction enabled by a dual reformulation. Numerical experiments indicate that our approach has better computational and statistical properties than the state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21t/li21t.pdf", "supp": "", "pdf_size": 958834, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13967502296963435329&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Risk Analytics and Optimization Chair, \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne; Risk Analytics and Optimization Chair, \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne; Risk Analytics and Optimization Chair, \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", "aff_domain": "epfl.ch; ; ", "email": "epfl.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21t.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "Risk Analytics and Optimization Chair", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Ditto: Fair and Robust Federated Learning Through Personalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10571", "id": "10571", "proceeding": "http://proceedings.mlr.press/v139/li21h.html", "slides": "", "author_site": "Tian Li, Shengyuan Hu, Ahmad Beirami, Virginia Smith", "author": "Tian Li; Shengyuan Hu; Ahmad Beirami; Virginia Smith", "abstract": "Fairness and robustness are two important concerns for federated learning systems. In this work, we identify that robustness to data and model poisoning attacks and fairness, measured as the uniformity of performance across devices, are competing constraints in statistically heterogeneous networks. To address these constraints, we propose employing a simple, general framework for personalized federated learning, Ditto, that can inherently provide fairness and robustness benefits, and develop a scalable solver for it. Theoretically, we analyze the ability of Ditto to achieve fairness and robustness simultaneously on a class of linear problems. Empirically, across a suite of federated datasets, we show that Ditto not only achieves competitive performance relative to recent personalization methods, but also enables more accurate, robust, and fair models relative to state-of-the-art fair or robust baselines.", "bibtex": "@InProceedings{pmlr-v139-li21h,\n title = \t {Ditto: Fair and Robust Federated Learning Through Personalization},\n author = {Li, Tian and Hu, Shengyuan and Beirami, Ahmad and Smith, Virginia},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6357--6368},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21h/li21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21h.html},\n abstract = \t {Fairness and robustness are two important concerns for federated learning systems. In this work, we identify that robustness to data and model poisoning attacks and fairness, measured as the uniformity of performance across devices, are competing constraints in statistically heterogeneous networks. To address these constraints, we propose employing a simple, general framework for personalized federated learning, Ditto, that can inherently provide fairness and robustness benefits, and develop a scalable solver for it. Theoretically, we analyze the ability of Ditto to achieve fairness and robustness simultaneously on a class of linear problems. Empirically, across a suite of federated datasets, we show that Ditto not only achieves competitive performance relative to recent personalization methods, but also enables more accurate, robust, and fair models relative to state-of-the-art fair or robust baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21h/li21h.pdf", "supp": "", "pdf_size": 1864324, "gs_citation": 1178, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11515326237813489969&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Carnegie Mellon University; Carnegie Mellon University; Facebook AI; Carnegie Mellon University", "aff_domain": "cmu.edu; ;fb.com;cmu.edu", "email": "cmu.edu; ;fb.com;cmu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/li21h.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Carnegie Mellon University;Meta", "aff_unique_dep": ";Facebook AI", "aff_unique_url": "https://www.cmu.edu;https://www.facebook.com", "aff_unique_abbr": "CMU;Facebook AI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Diversity Actor-Critic: Sample-Aware Entropy Regularization for Sample-Efficient Exploration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10269", "id": "10269", "proceeding": "http://proceedings.mlr.press/v139/han21a.html", "slides": "/media/icml-2021/Slides/10269.pdf", "author_site": "Seungyul Han, Youngchul Sung", "author": "Seungyul Han; Youngchul Sung", "abstract": "In this paper, sample-aware policy entropy regularization is proposed to enhance the conventional policy entropy regularization for better exploration. Exploiting the sample distribution obtainable from the replay buffer, the proposed sample-aware entropy regularization maximizes the entropy of the weighted sum of the policy action distribution and the sample action distribution from the replay buffer for sample-efficient exploration. A practical algorithm named diversity actor-critic (DAC) is developed by applying policy iteration to the objective function with the proposed sample-aware entropy regularization. Numerical results show that DAC significantly outperforms existing recent algorithms for reinforcement learning.", "bibtex": "@InProceedings{pmlr-v139-han21a,\n title = \t {Diversity Actor-Critic: Sample-Aware Entropy Regularization for Sample-Efficient Exploration},\n author = {Han, Seungyul and Sung, Youngchul},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4018--4029},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/han21a/han21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/han21a.html},\n abstract = \t {In this paper, sample-aware policy entropy regularization is proposed to enhance the conventional policy entropy regularization for better exploration. Exploiting the sample distribution obtainable from the replay buffer, the proposed sample-aware entropy regularization maximizes the entropy of the weighted sum of the policy action distribution and the sample action distribution from the replay buffer for sample-efficient exploration. A practical algorithm named diversity actor-critic (DAC) is developed by applying policy iteration to the objective function with the proposed sample-aware entropy regularization. Numerical results show that DAC significantly outperforms existing recent algorithms for reinforcement learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/han21a/han21a.pdf", "supp": "", "pdf_size": 3829696, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1891726031922597340&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering, Korea Advanced Insti-tute of Science and Technology, Daejeon, South Korea; Department of Electrical Engineering, Korea Advanced Insti-tute of Science and Technology, Daejeon, South Korea", "aff_domain": "kaist.ac.kr;kaist.ac.kr", "email": "kaist.ac.kr;kaist.ac.kr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/han21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Korea Advanced Institute of Science and Technology", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.kaist.ac.kr", "aff_unique_abbr": "KAIST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Daejeon", "aff_country_unique_index": "0;0", "aff_country_unique": "South Korea" }, { "title": "Do We Actually Need Dense Over-Parameterization? In-Time Over-Parameterization in Sparse Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10199", "id": "10199", "proceeding": "http://proceedings.mlr.press/v139/liu21y.html", "slides": "", "author_site": "Shiwei Liu, Lu Yin, Decebal Mocanu, Mykola Pechenizkiy", "author": "Shiwei Liu; Lu Yin; Decebal Constantin Mocanu; Mykola Pechenizkiy", "abstract": "In this paper, we introduce a new perspective on training deep neural networks capable of state-of-the-art performance without the need for the expensive over-parameterization by proposing the concept of In-Time Over-Parameterization (ITOP) in sparse training. By starting from a random sparse network and continuously exploring sparse connectivities during training, we can perform an Over-Parameterization over the course of training, closing the gap in the expressibility between sparse training and dense training. We further use ITOP to understand the underlying mechanism of Dynamic Sparse Training (DST) and discover that the benefits of DST come from its ability to consider across time all possible parameters when searching for the optimal sparse connectivity. As long as sufficient parameters have been reliably explored, DST can outperform the dense neural network by a large margin. We present a series of experiments to support our conjecture and achieve the state-of-the-art sparse training performance with ResNet-50 on ImageNet. More impressively, ITOP achieves dominant performance over the overparameterization-based sparse methods at extreme sparsities. When trained with ResNet-34 on CIFAR-100, ITOP can match the performance of the dense model at an extreme sparsity 98%.", "bibtex": "@InProceedings{pmlr-v139-liu21y,\n title = \t {Do We Actually Need Dense Over-Parameterization? In-Time Over-Parameterization in Sparse Training},\n author = {Liu, Shiwei and Yin, Lu and Mocanu, Decebal Constantin and Pechenizkiy, Mykola},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6989--7000},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21y/liu21y.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21y.html},\n abstract = \t {In this paper, we introduce a new perspective on training deep neural networks capable of state-of-the-art performance without the need for the expensive over-parameterization by proposing the concept of In-Time Over-Parameterization (ITOP) in sparse training. By starting from a random sparse network and continuously exploring sparse connectivities during training, we can perform an Over-Parameterization over the course of training, closing the gap in the expressibility between sparse training and dense training. We further use ITOP to understand the underlying mechanism of Dynamic Sparse Training (DST) and discover that the benefits of DST come from its ability to consider across time all possible parameters when searching for the optimal sparse connectivity. As long as sufficient parameters have been reliably explored, DST can outperform the dense neural network by a large margin. We present a series of experiments to support our conjecture and achieve the state-of-the-art sparse training performance with ResNet-50 on ImageNet. More impressively, ITOP achieves dominant performance over the overparameterization-based sparse methods at extreme sparsities. When trained with ResNet-34 on CIFAR-100, ITOP can match the performance of the dense model at an extreme sparsity 98%.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21y/liu21y.pdf", "supp": "", "pdf_size": 5921386, "gs_citation": 152, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17950677328551432354&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Mathematics and Computer Science, Eindhoven University of Technology; Department of Mathematics and Computer Science, Eindhoven University of Technology; Department of Mathematics and Computer Science, Eindhoven University of Technology + Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente; Department of Mathematics and Computer Science, Eindhoven University of Technology", "aff_domain": "tue.nl; ; ; ", "email": "tue.nl; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21y.html", "aff_unique_index": "0;0;0+1;0", "aff_unique_norm": "Eindhoven University of Technology;University of Twente", "aff_unique_dep": "Department of Mathematics and Computer Science;Faculty of Electrical Engineering, Mathematics and Computer Science", "aff_unique_url": "https://www.tue.nl;https://www.utwente.nl", "aff_unique_abbr": "TU/e;UT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Eindhoven;", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "Netherlands" }, { "title": "Domain Generalization using Causal Matching", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8585", "id": "8585", "proceeding": "http://proceedings.mlr.press/v139/mahajan21b.html", "slides": "/media/icml-2021/Slides/8585.pdf", "author_site": "Divyat Mahajan, Shruti Tople, Amit Sharma", "author": "Divyat Mahajan; Shruti Tople; Amit Sharma", "abstract": "In the domain generalization literature, a common objective is to learn representations independent of the domain after conditioning on the class label. We show that this objective is not sufficient: there exist counter-examples where a model fails to generalize to unseen domains even after satisfying class-conditional domain invariance. We formalize this observation through a structural causal model and show the importance of modeling within-class variations for generalization. Specifically, classes contain objects that characterize specific causal features, and domains can be interpreted as interventions on these objects that change non-causal features. We highlight an alternative condition: inputs across domains should have the same representation if they are derived from the same object. Based on this objective, we propose matching-based algorithms when base objects are observed (e.g., through data augmentation) and approximate the objective when objects are not observed (MatchDG). Our simple matching-based algorithms are competitive to prior work on out-of-domain accuracy for rotated MNIST, Fashion-MNIST, PACS, and Chest-Xray datasets. Our method MatchDG also recovers ground-truth object matches: on MNIST and Fashion-MNIST, top-10 matches from MatchDG have over 50% overlap with ground-truth matches.", "bibtex": "@InProceedings{pmlr-v139-mahajan21b,\n title = \t {Domain Generalization using Causal Matching},\n author = {Mahajan, Divyat and Tople, Shruti and Sharma, Amit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7313--7324},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mahajan21b/mahajan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/mahajan21b.html},\n abstract = \t {In the domain generalization literature, a common objective is to learn representations independent of the domain after conditioning on the class label. We show that this objective is not sufficient: there exist counter-examples where a model fails to generalize to unseen domains even after satisfying class-conditional domain invariance. We formalize this observation through a structural causal model and show the importance of modeling within-class variations for generalization. Specifically, classes contain objects that characterize specific causal features, and domains can be interpreted as interventions on these objects that change non-causal features. We highlight an alternative condition: inputs across domains should have the same representation if they are derived from the same object. Based on this objective, we propose matching-based algorithms when base objects are observed (e.g., through data augmentation) and approximate the objective when objects are not observed (MatchDG). Our simple matching-based algorithms are competitive to prior work on out-of-domain accuracy for rotated MNIST, Fashion-MNIST, PACS, and Chest-Xray datasets. Our method MatchDG also recovers ground-truth object matches: on MNIST and Fashion-MNIST, top-10 matches from MatchDG have over 50% overlap with ground-truth matches.}\n}", "pdf": "http://proceedings.mlr.press/v139/mahajan21b/mahajan21b.pdf", "supp": "", "pdf_size": 2032058, "gs_citation": 377, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7680827305765663856&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Microsoft Research, India; Microsoft Research, UK; Microsoft Research, India", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/mahajan21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/india.aspx", "aff_unique_abbr": "MSR India", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "India;United Kingdom" }, { "title": "Don\u2019t Just Blame Over-parametrization for Over-confidence: Theoretical Analysis of Calibration in Binary Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8711", "id": "8711", "proceeding": "http://proceedings.mlr.press/v139/bai21c.html", "slides": "", "author_site": "Yu Bai, Song Mei, Huan Wang, Caiming Xiong", "author": "Yu Bai; Song Mei; Huan Wang; Caiming Xiong", "abstract": "Modern machine learning models with high accuracy are often miscalibrated\u2014the predicted top probability does not reflect the actual accuracy, and tends to be \\emph{over-confident}. It is commonly believed that such over-confidence is mainly due to \\emph{over-parametrization}, in particular when the model is large enough to memorize the training data and maximize the confidence. In this paper, we show theoretically that over-parametrization is not the only reason for over-confidence. We prove that \\emph{logistic regression is inherently over-confident}, in the realizable, under-parametrized setting where the data is generated from the logistic model, and the sample size is much larger than the number of parameters. Further, this over-confidence happens for general well-specified binary classification problems as long as the activation is symmetric and concave on the positive part. Perhaps surprisingly, we also show that over-confidence is not always the case\u2014there exists another activation function (and a suitable loss function) under which the learned classifier is \\emph{under-confident} at some probability values. Overall, our theory provides a precise characterization of calibration in realizable binary classification, which we verify on simulations and real data experiments.", "bibtex": "@InProceedings{pmlr-v139-bai21c,\n title = \t {Don\u2019t Just Blame Over-parametrization for Over-confidence: Theoretical Analysis of Calibration in Binary Classification},\n author = {Bai, Yu and Mei, Song and Wang, Huan and Xiong, Caiming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {566--576},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bai21c/bai21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/bai21c.html},\n abstract = \t {Modern machine learning models with high accuracy are often miscalibrated\u2014the predicted top probability does not reflect the actual accuracy, and tends to be \\emph{over-confident}. It is commonly believed that such over-confidence is mainly due to \\emph{over-parametrization}, in particular when the model is large enough to memorize the training data and maximize the confidence. In this paper, we show theoretically that over-parametrization is not the only reason for over-confidence. We prove that \\emph{logistic regression is inherently over-confident}, in the realizable, under-parametrized setting where the data is generated from the logistic model, and the sample size is much larger than the number of parameters. Further, this over-confidence happens for general well-specified binary classification problems as long as the activation is symmetric and concave on the positive part. Perhaps surprisingly, we also show that over-confidence is not always the case\u2014there exists another activation function (and a suitable loss function) under which the learned classifier is \\emph{under-confident} at some probability values. Overall, our theory provides a precise characterization of calibration in realizable binary classification, which we verify on simulations and real data experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/bai21c/bai21c.pdf", "supp": "", "pdf_size": 5624404, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9583279594465131433&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Salesforce Research; University of California, Berkeley; Salesforce Research; Salesforce Research", "aff_domain": "salesforce.com;berkeley.edu; ; ", "email": "salesforce.com;berkeley.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/bai21c.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Salesforce;University of California, Berkeley", "aff_unique_dep": "Salesforce Research;", "aff_unique_url": "https://research.salesforce.com;https://www.berkeley.edu", "aff_unique_abbr": "Salesforce;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "DouZero: Mastering DouDizhu with Self-Play Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8445", "id": "8445", "proceeding": "http://proceedings.mlr.press/v139/zha21a.html", "slides": "/media/icml-2021/Slides/8445.pdf", "author_site": "Daochen Zha, Jingru Xie, Wenye Ma, Sheng Zhang, Xiangru Lian, Xia Hu, Ji Liu", "author": "Daochen Zha; Jingru Xie; Wenye Ma; Sheng Zhang; Xiangru Lian; Xia Hu; Ji Liu", "abstract": "Games are abstractions of the real world, where artificial agents learn to compete and cooperate with other agents. While significant achievements have been made in various perfect- and imperfect-information games, DouDizhu (a.k.a. Fighting the Landlord), a three-player card game, is still unsolved. DouDizhu is a very challenging domain with competition, collaboration, imperfect information, large state space, and particularly a massive set of possible actions where the legal actions vary significantly from turn to turn. Unfortunately, modern reinforcement learning algorithms mainly focus on simple and small action spaces, and not surprisingly, are shown not to make satisfactory progress in DouDizhu. In this work, we propose a conceptually simple yet effective DouDizhu AI system, namely DouZero, which enhances traditional Monte-Carlo methods with deep neural networks, action encoding, and parallel actors. Starting from scratch in a single server with four GPUs, DouZero outperformed all the existing DouDizhu AI programs in days of training and was ranked the first in the Botzone leaderboard among 344 AI agents. Through building DouZero, we show that classic Monte-Carlo methods can be made to deliver strong results in a hard domain with a complex action space. The code and an online demo are released at https://github.com/kwai/DouZero with the hope that this insight could motivate future work.", "bibtex": "@InProceedings{pmlr-v139-zha21a,\n title = \t {DouZero: Mastering DouDizhu with Self-Play Deep Reinforcement Learning},\n author = {Zha, Daochen and Xie, Jingru and Ma, Wenye and Zhang, Sheng and Lian, Xiangru and Hu, Xia and Liu, Ji},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12333--12344},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zha21a/zha21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zha21a.html},\n abstract = \t {Games are abstractions of the real world, where artificial agents learn to compete and cooperate with other agents. While significant achievements have been made in various perfect- and imperfect-information games, DouDizhu (a.k.a. Fighting the Landlord), a three-player card game, is still unsolved. DouDizhu is a very challenging domain with competition, collaboration, imperfect information, large state space, and particularly a massive set of possible actions where the legal actions vary significantly from turn to turn. Unfortunately, modern reinforcement learning algorithms mainly focus on simple and small action spaces, and not surprisingly, are shown not to make satisfactory progress in DouDizhu. In this work, we propose a conceptually simple yet effective DouDizhu AI system, namely DouZero, which enhances traditional Monte-Carlo methods with deep neural networks, action encoding, and parallel actors. Starting from scratch in a single server with four GPUs, DouZero outperformed all the existing DouDizhu AI programs in days of training and was ranked the first in the Botzone leaderboard among 344 AI agents. Through building DouZero, we show that classic Monte-Carlo methods can be made to deliver strong results in a hard domain with a complex action space. The code and an online demo are released at https://github.com/kwai/DouZero with the hope that this insight could motivate future work.}\n}", "pdf": "http://proceedings.mlr.press/v139/zha21a/zha21a.pdf", "supp": "", "pdf_size": 1191368, "gs_citation": 167, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10717987879996790788&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Engineering, Texas A&M University; AI Platform, Kwai Inc.; Georgia Institute of Technology; AI Platform, Kwai Inc.; AI Platform, Kwai Inc.; Department of Computer Science and Engineering, Texas A&M University; AI Platform, Kwai Inc.", "aff_domain": "tamu.edu; ; ; ; ; ; ", "email": "tamu.edu; ; ; ; ; ; ", "github": "https://github.com/kwai/DouZero", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zha21a.html", "aff_unique_index": "0;1;2;1;1;0;1", "aff_unique_norm": "Texas A&M University;Kwai Inc.;Georgia Institute of Technology", "aff_unique_dep": "Department of Computer Science and Engineering;AI Platform;", "aff_unique_url": "https://www.tamu.edu;https://www.kwai.com;https://www.gatech.edu", "aff_unique_abbr": "TAMU;Kwai;Georgia Tech", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;1;1;0;1", "aff_country_unique": "United States;China" }, { "title": "Double-Win Quant: Aggressively Winning Robustness of Quantized Deep Neural Networks via Random Precision Training and Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9959", "id": "9959", "proceeding": "http://proceedings.mlr.press/v139/fu21c.html", "slides": "", "author_site": "Yonggan Fu, Qixuan Yu, Meng Li, Vikas Chandra, Yingyan Lin", "author": "Yonggan Fu; Qixuan Yu; Meng Li; Vikas Chandra; Yingyan Lin", "abstract": "Quantization is promising in enabling powerful yet complex deep neural networks (DNNs) to be deployed into resource constrained platforms. However, quantized DNNs are vulnerable to adversarial attacks unless being equipped with sophisticated techniques, leading to a dilemma of struggling between DNNs\u2019 efficiency and robustness. In this work, we demonstrate a new perspective regarding quantization\u2019s role in DNNs\u2019 robustness, advocating that quantization can be leveraged to largely boost DNNs\u2019 robustness, and propose a framework dubbed Double-Win Quant that can boost the robustness of quantized DNNs over their full precision counterparts by a large margin. Specifically, we for the first time identify that when an adversarially trained model is quantized to different precisions in a post-training manner, the associated adversarial attacks transfer poorly between different precisions. Leveraging this intriguing observation, we further develop Double-Win Quant integrating random precision inference and training to further reduce and utilize the poor adversarial transferability, enabling an aggressive \u201cwin-win\" in terms of DNNs\u2019 robustness and efficiency. Extensive experiments and ablation studies consistently validate Double-Win Quant\u2019s effectiveness and advantages over state-of-the-art (SOTA) adversarial training methods across various attacks/models/datasets. Our codes are available at: https://github.com/RICE-EIC/Double-Win-Quant.", "bibtex": "@InProceedings{pmlr-v139-fu21c,\n title = \t {Double-Win Quant: Aggressively Winning Robustness of Quantized Deep Neural Networks via Random Precision Training and Inference},\n author = {Fu, Yonggan and Yu, Qixuan and Li, Meng and Chandra, Vikas and Lin, Yingyan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3492--3504},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fu21c/fu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/fu21c.html},\n abstract = \t {Quantization is promising in enabling powerful yet complex deep neural networks (DNNs) to be deployed into resource constrained platforms. However, quantized DNNs are vulnerable to adversarial attacks unless being equipped with sophisticated techniques, leading to a dilemma of struggling between DNNs\u2019 efficiency and robustness. In this work, we demonstrate a new perspective regarding quantization\u2019s role in DNNs\u2019 robustness, advocating that quantization can be leveraged to largely boost DNNs\u2019 robustness, and propose a framework dubbed Double-Win Quant that can boost the robustness of quantized DNNs over their full precision counterparts by a large margin. Specifically, we for the first time identify that when an adversarially trained model is quantized to different precisions in a post-training manner, the associated adversarial attacks transfer poorly between different precisions. Leveraging this intriguing observation, we further develop Double-Win Quant integrating random precision inference and training to further reduce and utilize the poor adversarial transferability, enabling an aggressive \u201cwin-win\" in terms of DNNs\u2019 robustness and efficiency. Extensive experiments and ablation studies consistently validate Double-Win Quant\u2019s effectiveness and advantages over state-of-the-art (SOTA) adversarial training methods across various attacks/models/datasets. Our codes are available at: https://github.com/RICE-EIC/Double-Win-Quant.}\n}", "pdf": "http://proceedings.mlr.press/v139/fu21c/fu21c.pdf", "supp": "", "pdf_size": 649401, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4090620012941677396&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, Rice University; Facebook Inc.; Facebook Inc.; Department of Electrical and Computer Engineering, Rice University", "aff_domain": "rice.edu; ; ; ;rice.edu", "email": "rice.edu; ; ; ;rice.edu", "github": "https://github.com/RICE-EIC/Double-Win-Quant", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/fu21c.html", "aff_unique_index": "0;0;1;1;0", "aff_unique_norm": "Rice University;Meta", "aff_unique_dep": "Department of Electrical and Computer Engineering;Facebook", "aff_unique_url": "https://www.rice.edu;https://www.facebook.com", "aff_unique_abbr": "Rice;FB", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Doubly Robust Off-Policy Actor-Critic: Convergence and Optimality", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8701", "id": "8701", "proceeding": "http://proceedings.mlr.press/v139/xu21j.html", "slides": "", "author_site": "Tengyu Xu, Zhuoran Yang, Zhaoran Wang, Yingbin LIANG", "author": "Tengyu Xu; Zhuoran Yang; Zhaoran Wang; Yingbin Liang", "abstract": "Designing off-policy reinforcement learning algorithms is typically a very challenging task, because a desirable iteration update often involves an expectation over an on-policy distribution. Prior off-policy actor-critic (AC) algorithms have introduced a new critic that uses the density ratio for adjusting the distribution mismatch in order to stabilize the convergence, but at the cost of potentially introducing high biases due to the estimation errors of both the density ratio and value function. In this paper, we develop a doubly robust off-policy AC (DR-Off-PAC) for discounted MDP, which can take advantage of learned nuisance functions to reduce estimation errors. Moreover, DR-Off-PAC adopts a single timescale structure, in which both actor and critics are updated simultaneously with constant stepsize, and is thus more sample efficient than prior algorithms that adopt either two timescale or nested-loop structure. We study the finite-time convergence rate and characterize the sample complexity for DR-Off-PAC to attain an $\\epsilon$-accurate optimal policy. We also show that the overall convergence of DR-Off-PAC is doubly robust to the approximation errors that depend only on the expressive power of approximation functions. To the best of our knowledge, our study establishes the first overall sample complexity analysis for single time-scale off-policy AC algorithm.", "bibtex": "@InProceedings{pmlr-v139-xu21j,\n title = \t {Doubly Robust Off-Policy Actor-Critic: Convergence and Optimality},\n author = {Xu, Tengyu and Yang, Zhuoran and Wang, Zhaoran and Liang, Yingbin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11581--11591},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21j/xu21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21j.html},\n abstract = \t {Designing off-policy reinforcement learning algorithms is typically a very challenging task, because a desirable iteration update often involves an expectation over an on-policy distribution. Prior off-policy actor-critic (AC) algorithms have introduced a new critic that uses the density ratio for adjusting the distribution mismatch in order to stabilize the convergence, but at the cost of potentially introducing high biases due to the estimation errors of both the density ratio and value function. In this paper, we develop a doubly robust off-policy AC (DR-Off-PAC) for discounted MDP, which can take advantage of learned nuisance functions to reduce estimation errors. Moreover, DR-Off-PAC adopts a single timescale structure, in which both actor and critics are updated simultaneously with constant stepsize, and is thus more sample efficient than prior algorithms that adopt either two timescale or nested-loop structure. We study the finite-time convergence rate and characterize the sample complexity for DR-Off-PAC to attain an $\\epsilon$-accurate optimal policy. We also show that the overall convergence of DR-Off-PAC is doubly robust to the approximation errors that depend only on the expressive power of approximation functions. To the best of our knowledge, our study establishes the first overall sample complexity analysis for single time-scale off-policy AC algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21j/xu21j.pdf", "supp": "", "pdf_size": 472735, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12212363985860185714&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, The Ohio State University; Departments of Industrial Engineering & Management Sciences, Northwestern University; Department of Operations Research and Financial Engineering, Princeton University; Department of Electrical and Computer Engineering, The Ohio State University", "aff_domain": "osu.edu; ; ; ", "email": "osu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xu21j.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Ohio State University;Northwestern University;Princeton University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Departments of Industrial Engineering & Management Sciences;Department of Operations Research and Financial Engineering", "aff_unique_url": "https://www.osu.edu;https://www.northwestern.edu;https://www.princeton.edu", "aff_unique_abbr": "OSU;NU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "DriftSurf: Stable-State / Reactive-State Learning under Concept Drift", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9929", "id": "9929", "proceeding": "http://proceedings.mlr.press/v139/tahmasbi21a.html", "slides": "/media/icml-2021/Slides/9929.pdf", "author_site": "Ashraf Tahmasbi, Ellango Jothimurugesan, Srikanta Tirthapura, Phillip Gibbons", "author": "Ashraf Tahmasbi; Ellango Jothimurugesan; Srikanta Tirthapura; Phillip B Gibbons", "abstract": "When learning from streaming data, a change in the data distribution, also known as concept drift, can render a previously-learned model inaccurate and require training a new model. We present an adaptive learning algorithm that extends previous drift-detection-based methods by incorporating drift detection into a broader stable-state/reactive-state process. The advantage of our approach is that we can use aggressive drift detection in the stable state to achieve a high detection rate, but mitigate the false positive rate of standalone drift detection via a reactive state that reacts quickly to true drifts while eliminating most false positives. The algorithm is generic in its base learner and can be applied across a variety of supervised learning problems. Our theoretical analysis shows that the risk of the algorithm is (i) statistically better than standalone drift detection and (ii) competitive to an algorithm with oracle knowledge of when (abrupt) drifts occur. Experiments on synthetic and real datasets with concept drifts confirm our theoretical analysis.", "bibtex": "@InProceedings{pmlr-v139-tahmasbi21a,\n title = \t {DriftSurf: Stable-State / Reactive-State Learning under Concept Drift},\n author = {Tahmasbi, Ashraf and Jothimurugesan, Ellango and Tirthapura, Srikanta and Gibbons, Phillip B},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10054--10064},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tahmasbi21a/tahmasbi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tahmasbi21a.html},\n abstract = \t {When learning from streaming data, a change in the data distribution, also known as concept drift, can render a previously-learned model inaccurate and require training a new model. We present an adaptive learning algorithm that extends previous drift-detection-based methods by incorporating drift detection into a broader stable-state/reactive-state process. The advantage of our approach is that we can use aggressive drift detection in the stable state to achieve a high detection rate, but mitigate the false positive rate of standalone drift detection via a reactive state that reacts quickly to true drifts while eliminating most false positives. The algorithm is generic in its base learner and can be applied across a variety of supervised learning problems. Our theoretical analysis shows that the risk of the algorithm is (i) statistically better than standalone drift detection and (ii) competitive to an algorithm with oracle knowledge of when (abrupt) drifts occur. Experiments on synthetic and real datasets with concept drifts confirm our theoretical analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/tahmasbi21a/tahmasbi21a.pdf", "supp": "", "pdf_size": 2077148, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3146303270286957851&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Electrical and Computer Engineering, Iowa State University, Ames, Iowa, USA + Apple Inc, Cupertino, California, USA; Computer Science Department, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; Department of Electrical and Computer Engineering, Iowa State University, Ames, Iowa, USA + Apple Inc, Cupertino, California, USA; Computer Science Department, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA", "aff_domain": "iastate.edu;cs.cmu.edu; ; ", "email": "iastate.edu;cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tahmasbi21a.html", "aff_unique_index": "0+1;2;0+1;2", "aff_unique_norm": "Iowa State University;Apple;Carnegie Mellon University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Apple Inc;Computer Science Department", "aff_unique_url": "https://www.iastate.edu;https://www.apple.com;https://www.cmu.edu", "aff_unique_abbr": "ISU;Apple;CMU", "aff_campus_unique_index": "0+1;2;0+1;2", "aff_campus_unique": "Ames;Cupertino;Pittsburgh", "aff_country_unique_index": "0+0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Dropout: Explicit Forms and Capacity Control", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10381", "id": "10381", "proceeding": "http://proceedings.mlr.press/v139/arora21a.html", "slides": "/media/icml-2021/Slides/10381.pdf", "author_site": "Raman Arora, Peter Bartlett, Poorya Mianjy, Nati Srebro", "author": "Raman Arora; Peter Bartlett; Poorya Mianjy; Nathan Srebro", "abstract": "We investigate the capacity control provided by dropout in various machine learning problems. First, we study dropout for matrix completion, where it induces a distribution-dependent regularizer that equals the weighted trace-norm of the product of the factors. In deep learning, we show that the distribution-dependent regularizer due to dropout directly controls the Rademacher complexity of the underlying class of deep neural networks. These developments enable us to give concrete generalization error bounds for the dropout algorithm in both matrix completion as well as training deep neural networks.", "bibtex": "@InProceedings{pmlr-v139-arora21a,\n title = \t {Dropout: Explicit Forms and Capacity Control},\n author = {Arora, Raman and Bartlett, Peter and Mianjy, Poorya and Srebro, Nathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {351--361},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/arora21a/arora21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/arora21a.html},\n abstract = \t {We investigate the capacity control provided by dropout in various machine learning problems. First, we study dropout for matrix completion, where it induces a distribution-dependent regularizer that equals the weighted trace-norm of the product of the factors. In deep learning, we show that the distribution-dependent regularizer due to dropout directly controls the Rademacher complexity of the underlying class of deep neural networks. These developments enable us to give concrete generalization error bounds for the dropout algorithm in both matrix completion as well as training deep neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/arora21a/arora21a.pdf", "supp": "", "pdf_size": 790046, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7582175856338441846&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Johns Hopkins University; University of California, Berkeley; Johns Hopkins University; TTI Chicago", "aff_domain": "cs.jhu.edu; ; ; ", "email": "cs.jhu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/arora21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Johns Hopkins University;University of California, Berkeley;Toyota Technological Institute at Chicago", "aff_unique_dep": ";;", "aff_unique_url": "https://www.jhu.edu;https://www.berkeley.edu;https://www.tti-chicago.org", "aff_unique_abbr": "JHU;UC Berkeley;TTI", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Berkeley;Chicago", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Dual Principal Component Pursuit for Robust Subspace Learning: Theory and Algorithms for a Holistic Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10547", "id": "10547", "proceeding": "http://proceedings.mlr.press/v139/ding21b.html", "slides": "", "author_site": "Tianyu Ding, Zhihui Zhu, Rene Vidal, Daniel Robinson", "author": "Tianyu Ding; Zhihui Zhu; Rene Vidal; Daniel P Robinson", "abstract": "The Dual Principal Component Pursuit (DPCP) method has been proposed to robustly recover a subspace of high-relative dimension from corrupted data. Existing analyses and algorithms of DPCP, however, mainly focus on finding a normal to a single hyperplane that contains the inliers. Although these algorithms can be extended to a subspace of higher co-dimension through a recursive approach that sequentially finds a new basis element of the space orthogonal to the subspace, this procedure is computationally expensive and lacks convergence guarantees. In this paper, we consider a DPCP approach for simultaneously computing the entire basis of the orthogonal complement subspace (we call this a holistic approach) by solving a non-convex non-smooth optimization problem over the Grassmannian. We provide geometric and statistical analyses for the global optimality and prove that it can tolerate as many outliers as the square of the number of inliers, under both noiseless and noisy settings. We then present a Riemannian regularity condition for the problem, which is then used to prove that a Riemannian subgradient method converges linearly to a neighborhood of the orthogonal subspace with error proportional to the noise level.", "bibtex": "@InProceedings{pmlr-v139-ding21b,\n title = \t {Dual Principal Component Pursuit for Robust Subspace Learning: Theory and Algorithms for a Holistic Approach},\n author = {Ding, Tianyu and Zhu, Zhihui and Vidal, Rene and Robinson, Daniel P},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2739--2748},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ding21b/ding21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/ding21b.html},\n abstract = \t {The Dual Principal Component Pursuit (DPCP) method has been proposed to robustly recover a subspace of high-relative dimension from corrupted data. Existing analyses and algorithms of DPCP, however, mainly focus on finding a normal to a single hyperplane that contains the inliers. Although these algorithms can be extended to a subspace of higher co-dimension through a recursive approach that sequentially finds a new basis element of the space orthogonal to the subspace, this procedure is computationally expensive and lacks convergence guarantees. In this paper, we consider a DPCP approach for simultaneously computing the entire basis of the orthogonal complement subspace (we call this a holistic approach) by solving a non-convex non-smooth optimization problem over the Grassmannian. We provide geometric and statistical analyses for the global optimality and prove that it can tolerate as many outliers as the square of the number of inliers, under both noiseless and noisy settings. We then present a Riemannian regularity condition for the problem, which is then used to prove that a Riemannian subgradient method converges linearly to a neighborhood of the orthogonal subspace with error proportional to the noise level.}\n}", "pdf": "http://proceedings.mlr.press/v139/ding21b/ding21b.pdf", "supp": "", "pdf_size": 1130182, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10541376465905675052&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Applied Mathematics & Statistics, Johns Hopkins University, USA; School of Electrical and Computer Engineering, University of Denver, USA; Mathematical Institute for Data Science, Johns Hopkins University, USA; Department of Industrial and Systems Engineering, Lehigh University, USA", "aff_domain": "jhu.edu;du.edu; ; ", "email": "jhu.edu;du.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ding21b.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Johns Hopkins University;University of Denver;Lehigh University", "aff_unique_dep": "Department of Applied Mathematics & Statistics;School of Electrical and Computer Engineering;Department of Industrial and Systems Engineering", "aff_unique_url": "https://www.jhu.edu;https://www.du.edu;https://www.lehigh.edu", "aff_unique_abbr": "JHU;DU;Lehigh", "aff_campus_unique_index": "1", "aff_campus_unique": ";Denver", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Dueling Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10441", "id": "10441", "proceeding": "http://proceedings.mlr.press/v139/saha21b.html", "slides": "/media/icml-2021/Slides/10441_NjI1bpw.pdf", "author_site": "Aadirupa Saha, Tomer Koren, Yishay Mansour", "author": "Aadirupa Saha; Tomer Koren; Yishay Mansour", "abstract": "We address the problem of convex optimization with preference (dueling) feedback. Like the traditional optimization objective, the goal is to find the optimal point with the least possible query complexity, however, without the luxury of even a zeroth order feedback. Instead, the learner can only observe a single noisy bit which is win-loss feedback for a pair of queried points based on their function values. % The problem is certainly of great practical relevance as in many real-world scenarios, such as recommender systems or learning from customer preferences, where the system feedback is often restricted to just one binary-bit preference information. % We consider the problem of online convex optimization (OCO) solely by actively querying $\\{0,1\\}$ noisy-comparison feedback of decision point pairs, with the objective of finding a near-optimal point (function minimizer) with the least possible number of queries. %a very general class of monotonic, non-decreasing transfer functions, and analyze the problem for any $d$-dimensional smooth convex function. % For the non-stationary OCO setup, where the underlying convex function may change over time, we prove an impossibility result towards achieving the above objective. We next focus only on the stationary OCO problem, and our main contribution lies in designing a normalized gradient descent based algorithm towards finding a $\\epsilon$-best optimal point. Towards this, our algorithm is shown to yield a convergence rate of $\\tilde O(\\nicefrac{d\\beta}{\\epsilon \\nu^2})$ ($\\nu$ being the noise parameter) when the underlying function is $\\beta$-smooth. Further we show an improved convergence rate of just $\\tilde O(\\nicefrac{d\\beta}{\\alpha \\nu^2} \\log \\frac{1}{\\epsilon})$ when the function is additionally also $\\alpha$-strongly convex.", "bibtex": "@InProceedings{pmlr-v139-saha21b,\n title = \t {Dueling Convex Optimization},\n author = {Saha, Aadirupa and Koren, Tomer and Mansour, Yishay},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9245--9254},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/saha21b/saha21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/saha21b.html},\n abstract = \t {We address the problem of convex optimization with preference (dueling) feedback. Like the traditional optimization objective, the goal is to find the optimal point with the least possible query complexity, however, without the luxury of even a zeroth order feedback. Instead, the learner can only observe a single noisy bit which is win-loss feedback for a pair of queried points based on their function values. % The problem is certainly of great practical relevance as in many real-world scenarios, such as recommender systems or learning from customer preferences, where the system feedback is often restricted to just one binary-bit preference information. % We consider the problem of online convex optimization (OCO) solely by actively querying $\\{0,1\\}$ noisy-comparison feedback of decision point pairs, with the objective of finding a near-optimal point (function minimizer) with the least possible number of queries. %a very general class of monotonic, non-decreasing transfer functions, and analyze the problem for any $d$-dimensional smooth convex function. % For the non-stationary OCO setup, where the underlying convex function may change over time, we prove an impossibility result towards achieving the above objective. We next focus only on the stationary OCO problem, and our main contribution lies in designing a normalized gradient descent based algorithm towards finding a $\\epsilon$-best optimal point. Towards this, our algorithm is shown to yield a convergence rate of $\\tilde O(\\nicefrac{d\\beta}{\\epsilon \\nu^2})$ ($\\nu$ being the noise parameter) when the underlying function is $\\beta$-smooth. Further we show an improved convergence rate of just $\\tilde O(\\nicefrac{d\\beta}{\\alpha \\nu^2} \\log \\frac{1}{\\epsilon})$ when the function is additionally also $\\alpha$-strongly convex.}\n}", "pdf": "http://proceedings.mlr.press/v139/saha21b/saha21b.pdf", "supp": "", "pdf_size": 364024, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11648495367772567155&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Microsoft Research, New York City; Blavatnik School of Computer Science, Tel Aviv University + Google Research Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University + Google Research Tel Aviv", "aff_domain": "microsoft.com; ; ", "email": "microsoft.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/saha21b.html", "aff_unique_index": "0;1+2;1+2", "aff_unique_norm": "Microsoft;Tel Aviv University;Google", "aff_unique_dep": "Microsoft Research;Blavatnik School of Computer Science;Google Research", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.tau.ac.il;https://research.google", "aff_unique_abbr": "MSR;TAU;Google", "aff_campus_unique_index": "0;1+1;1+1", "aff_campus_unique": "New York City;Tel Aviv", "aff_country_unique_index": "0;1+1;1+1", "aff_country_unique": "United States;Israel" }, { "title": "Dynamic Balancing for Model Selection in Bandits and RL", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9907", "id": "9907", "proceeding": "http://proceedings.mlr.press/v139/cutkosky21a.html", "slides": "", "author_site": "Ashok Cutkosky, Christoph Dann, Abhimanyu Das, Claudio Gentile, Aldo Pacchiano, Manish Purohit", "author": "Ashok Cutkosky; Christoph Dann; Abhimanyu Das; Claudio Gentile; Aldo Pacchiano; Manish Purohit", "abstract": "We propose a framework for model selection by combining base algorithms in stochastic bandits and reinforcement learning. We require a candidate regret bound for each base algorithm that may or may not hold. We select base algorithms to play in each round using a \u201cbalancing condition\u201d on the candidate regret bounds. Our approach simultaneously recovers previous worst-case regret bounds, while also obtaining much smaller regret in natural scenarios when some base learners significantly exceed their candidate bounds. Our framework is relevant in many settings, including linear bandits and MDPs with nested function classes, linear bandits with unknown misspecification, and tuning confidence parameters of algorithms such as LinUCB. Moreover, unlike recent efforts in model selection for linear stochastic bandits, our approach can be extended to consider adversarial rather than stochastic contexts.", "bibtex": "@InProceedings{pmlr-v139-cutkosky21a,\n title = \t {Dynamic Balancing for Model Selection in Bandits and RL},\n author = {Cutkosky, Ashok and Dann, Christoph and Das, Abhimanyu and Gentile, Claudio and Pacchiano, Aldo and Purohit, Manish},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2276--2285},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cutkosky21a/cutkosky21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cutkosky21a.html},\n abstract = \t {We propose a framework for model selection by combining base algorithms in stochastic bandits and reinforcement learning. We require a candidate regret bound for each base algorithm that may or may not hold. We select base algorithms to play in each round using a \u201cbalancing condition\u201d on the candidate regret bounds. Our approach simultaneously recovers previous worst-case regret bounds, while also obtaining much smaller regret in natural scenarios when some base learners significantly exceed their candidate bounds. Our framework is relevant in many settings, including linear bandits and MDPs with nested function classes, linear bandits with unknown misspecification, and tuning confidence parameters of algorithms such as LinUCB. Moreover, unlike recent efforts in model selection for linear stochastic bandits, our approach can be extended to consider adversarial rather than stochastic contexts.}\n}", "pdf": "http://proceedings.mlr.press/v139/cutkosky21a/cutkosky21a.pdf", "supp": "", "pdf_size": 1123779, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13840692009457749694&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Boston University; Google Research; Google Research; Google Research; University of California, Berkeley; Google Research", "aff_domain": "cutkosky.com;cdann.net; ; ; ; ", "email": "cutkosky.com;cdann.net; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cutkosky21a.html", "aff_unique_index": "0;1;1;1;2;1", "aff_unique_norm": "Boston University;Google;University of California, Berkeley", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.bu.edu;https://research.google;https://www.berkeley.edu", "aff_unique_abbr": "BU;Google Research;UC Berkeley", "aff_campus_unique_index": "1;1;1;2;1", "aff_campus_unique": ";Mountain View;Berkeley", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Dynamic Game Theoretic Neural Optimizer", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10261", "id": "10261", "proceeding": "http://proceedings.mlr.press/v139/liu21d.html", "slides": "/media/icml-2021/Slides/10261.pdf", "author_site": "Guan-Horng Liu, Tianrong Chen, Evangelos Theodorou", "author": "Guan-Horng Liu; Tianrong Chen; Evangelos Theodorou", "abstract": "The connection between training deep neural networks (DNNs) and optimal control theory (OCT) has attracted considerable attention as a principled tool of algorithmic design. Despite few attempts being made, they have been limited to architectures where the layer propagation resembles a Markovian dynamical system. This casts doubts on their flexibility to modern networks that heavily rely on non-Markovian dependencies between layers (e.g. skip connections in residual networks). In this work, we propose a novel dynamic game perspective by viewing each layer as a player in a dynamic game characterized by the DNN itself. Through this lens, different classes of optimizers can be seen as matching different types of Nash equilibria, depending on the implicit information structure of each (p)layer. The resulting method, called Dynamic Game Theoretic Neural Optimizer (DGNOpt), not only generalizes OCT-inspired optimizers to richer network class; it also motivates a new training principle by solving a multi-player cooperative game. DGNOpt shows convergence improvements over existing methods on image classification datasets with residual and inception networks. Our work marries strengths from both OCT and game theory, paving ways to new algorithmic opportunities from robust optimal control and bandit-based optimization.", "bibtex": "@InProceedings{pmlr-v139-liu21d,\n title = \t {Dynamic Game Theoretic Neural Optimizer},\n author = {Liu, Guan-Horng and Chen, Tianrong and Theodorou, Evangelos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6759--6769},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21d/liu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21d.html},\n abstract = \t {The connection between training deep neural networks (DNNs) and optimal control theory (OCT) has attracted considerable attention as a principled tool of algorithmic design. Despite few attempts being made, they have been limited to architectures where the layer propagation resembles a Markovian dynamical system. This casts doubts on their flexibility to modern networks that heavily rely on non-Markovian dependencies between layers (e.g. skip connections in residual networks). In this work, we propose a novel dynamic game perspective by viewing each layer as a player in a dynamic game characterized by the DNN itself. Through this lens, different classes of optimizers can be seen as matching different types of Nash equilibria, depending on the implicit information structure of each (p)layer. The resulting method, called Dynamic Game Theoretic Neural Optimizer (DGNOpt), not only generalizes OCT-inspired optimizers to richer network class; it also motivates a new training principle by solving a multi-player cooperative game. DGNOpt shows convergence improvements over existing methods on image classification datasets with residual and inception networks. Our work marries strengths from both OCT and game theory, paving ways to new algorithmic opportunities from robust optimal control and bandit-based optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21d/liu21d.pdf", "supp": "", "pdf_size": 5424520, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14200131811064640499&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Center for Machine Learning+School of Aerospace Engineering; School of Electrical and Computer Engineering; Center for Machine Learning+School of Aerospace Engineering", "aff_domain": "gatech.edu; ;gatech.edu", "email": "gatech.edu; ;gatech.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/liu21d.html", "aff_unique_index": "0+1;2;0+1", "aff_unique_norm": "Center for Machine Learning;School of Aerospace Engineering;Institution not specified", "aff_unique_dep": "Machine Learning;Aerospace Engineering;Electrical and Computer Engineering", "aff_unique_url": ";;", "aff_unique_abbr": ";;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": ";", "aff_country_unique": "" }, { "title": "Dynamic Planning and Learning under Recovering Rewards", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10681", "id": "10681", "proceeding": "http://proceedings.mlr.press/v139/simchi-levi21a.html", "slides": "", "author_site": "David Simchi-Levi, Zeyu Zheng, Feng Zhu", "author": "David Simchi-Levi; Zeyu Zheng; Feng Zhu", "abstract": "Motivated by emerging applications such as live-streaming e-commerce, promotions and recommendations, we introduce a general class of multi-armed bandit problems that have the following two features: (i) the decision maker can pull and collect rewards from at most $K$ out of $N$ different arms in each time period; (ii) the expected reward of an arm immediately drops after it is pulled, and then non-parametrically recovers as the idle time increases. With the objective of maximizing expected cumulative rewards over $T$ time periods, we propose, construct and prove performance guarantees for a class of \u201cPurely Periodic Policies\u201d. For the offline problem when all model parameters are known, our proposed policy obtains an approximation ratio that is at the order of $1-\\mathcal O(1/\\sqrt{K})$, which is asymptotically optimal when $K$ grows to infinity. For the online problem when the model parameters are unknown and need to be learned, we design an Upper Confidence Bound (UCB) based policy that approximately has $\\widetilde{\\mathcal O}(N\\sqrt{T})$ regret against the offline benchmark. Our framework and policy design may have the potential to be adapted into other offline planning and online learning applications with non-stationary and recovering rewards.", "bibtex": "@InProceedings{pmlr-v139-simchi-levi21a,\n title = \t {Dynamic Planning and Learning under Recovering Rewards},\n author = {Simchi-Levi, David and Zheng, Zeyu and Zhu, Feng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9702--9711},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/simchi-levi21a/simchi-levi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/simchi-levi21a.html},\n abstract = \t {Motivated by emerging applications such as live-streaming e-commerce, promotions and recommendations, we introduce a general class of multi-armed bandit problems that have the following two features: (i) the decision maker can pull and collect rewards from at most $K$ out of $N$ different arms in each time period; (ii) the expected reward of an arm immediately drops after it is pulled, and then non-parametrically recovers as the idle time increases. With the objective of maximizing expected cumulative rewards over $T$ time periods, we propose, construct and prove performance guarantees for a class of \u201cPurely Periodic Policies\u201d. For the offline problem when all model parameters are known, our proposed policy obtains an approximation ratio that is at the order of $1-\\mathcal O(1/\\sqrt{K})$, which is asymptotically optimal when $K$ grows to infinity. For the online problem when the model parameters are unknown and need to be learned, we design an Upper Confidence Bound (UCB) based policy that approximately has $\\widetilde{\\mathcal O}(N\\sqrt{T})$ regret against the offline benchmark. Our framework and policy design may have the potential to be adapted into other offline planning and online learning applications with non-stationary and recovering rewards.}\n}", "pdf": "http://proceedings.mlr.press/v139/simchi-levi21a/simchi-levi21a.pdf", "supp": "", "pdf_size": 276947, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=991896269358764838&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Massachusetts, USA+Department of Industrial Engineering and Operations Research, University of California, Berkeley, USA; Department of Industrial Engineering and Operations Research, University of California, Berkeley, USA; Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Massachusetts, USA", "aff_domain": "mit.edu;berkeley.edu;mit.edu", "email": "mit.edu;berkeley.edu;mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/simchi-levi21a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;University of California, Berkeley", "aff_unique_dep": "Institute for Data, Systems, and Society;Department of Industrial Engineering and Operations Research", "aff_unique_url": "https://web.mit.edu;https://www.berkeley.edu", "aff_unique_abbr": "MIT;UC Berkeley", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "Massachusetts;Berkeley", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "E(n) Equivariant Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9279", "id": "9279", "proceeding": "http://proceedings.mlr.press/v139/satorras21a.html", "slides": "", "author_site": "Victor Garcia Satorras, Emiel Hoogeboom, Max Welling", "author": "V\u0131\u0301ctor Garcia Satorras; Emiel Hoogeboom; Max Welling", "abstract": "This paper introduces a new model to learn graph neural networks equivariant to rotations, translations, reflections and permutations called E(n)-Equivariant Graph Neural Networks (EGNNs). In contrast with existing methods, our work does not require computationally expensive higher-order representations in intermediate layers while it still achieves competitive or better performance. In addition, whereas existing methods are limited to equivariance on 3 dimensional spaces, our model is easily scaled to higher-dimensional spaces. We demonstrate the effectiveness of our method on dynamical systems modelling, representation learning in graph autoencoders and predicting molecular properties.", "bibtex": "@InProceedings{pmlr-v139-satorras21a,\n title = \t {E(n) Equivariant Graph Neural Networks},\n author = {Satorras, V\\'{\\i}ctor Garcia and Hoogeboom, Emiel and Welling, Max},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9323--9332},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/satorras21a/satorras21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/satorras21a.html},\n abstract = \t {This paper introduces a new model to learn graph neural networks equivariant to rotations, translations, reflections and permutations called E(n)-Equivariant Graph Neural Networks (EGNNs). In contrast with existing methods, our work does not require computationally expensive higher-order representations in intermediate layers while it still achieves competitive or better performance. In addition, whereas existing methods are limited to equivariance on 3 dimensional spaces, our model is easily scaled to higher-dimensional spaces. We demonstrate the effectiveness of our method on dynamical systems modelling, representation learning in graph autoencoders and predicting molecular properties.}\n}", "pdf": "http://proceedings.mlr.press/v139/satorras21a/satorras21a.pdf", "supp": "", "pdf_size": 963444, "gs_citation": 1237, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7354013938007740374&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "UvA-Bosch Delta Lab, University of Amsterdam, Netherlands; UvA-Bosch Delta Lab, University of Amsterdam, Netherlands; UvA-Bosch Delta Lab, University of Amsterdam, Netherlands", "aff_domain": "uva.nl;uva.nl;uva.nl", "email": "uva.nl;uva.nl;uva.nl", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/satorras21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Amsterdam", "aff_unique_dep": "UvA-Bosch Delta Lab", "aff_unique_url": "https://www.uva.nl", "aff_unique_abbr": "UvA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Netherlands" }, { "title": "EL-Attention: Memory Efficient Lossless Attention for Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9703", "id": "9703", "proceeding": "http://proceedings.mlr.press/v139/yan21a.html", "slides": "", "author_site": "Yu Yan, Jiusheng Chen, Weizhen Qi, Nikhil Bhendawade, Yeyun Gong, Nan Duan, Ruofei Zhang", "author": "Yu Yan; Jiusheng Chen; Weizhen Qi; Nikhil Bhendawade; Yeyun Gong; Nan Duan; Ruofei Zhang", "abstract": "Transformer model with multi-head attention requires caching intermediate results for efficient inference in generation tasks. However, cache brings new memory-related costs and prevents leveraging larger batch size for faster speed. We propose memory-efficient lossless attention (called EL-attention) to address this issue. It avoids heavy operations for building multi-head keys and values, cache for them is not needed. EL-attention constructs an ensemble of attention results by expanding query while keeping key and value shared. It produces the same result as multi-head attention with less GPU memory and faster inference speed. We conduct extensive experiments on Transformer, BART, and GPT-2 for summarization and question generation tasks. The results show EL-attention speeds up existing models by 1.6x to 5.3x without accuracy loss.", "bibtex": "@InProceedings{pmlr-v139-yan21a,\n title = \t {EL-Attention: Memory Efficient Lossless Attention for Generation},\n author = {Yan, Yu and Chen, Jiusheng and Qi, Weizhen and Bhendawade, Nikhil and Gong, Yeyun and Duan, Nan and Zhang, Ruofei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11648--11658},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yan21a/yan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yan21a.html},\n abstract = \t {Transformer model with multi-head attention requires caching intermediate results for efficient inference in generation tasks. However, cache brings new memory-related costs and prevents leveraging larger batch size for faster speed. We propose memory-efficient lossless attention (called EL-attention) to address this issue. It avoids heavy operations for building multi-head keys and values, cache for them is not needed. EL-attention constructs an ensemble of attention results by expanding query while keeping key and value shared. It produces the same result as multi-head attention with less GPU memory and faster inference speed. We conduct extensive experiments on Transformer, BART, and GPT-2 for summarization and question generation tasks. The results show EL-attention speeds up existing models by 1.6x to 5.3x without accuracy loss.}\n}", "pdf": "http://proceedings.mlr.press/v139/yan21a/yan21a.pdf", "supp": "", "pdf_size": 594118, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1375858256863771464&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Microsoft, Redmond, WA, USA; University of Science and Technology of China; Microsoft, Redmond, WA, USA; Microsoft, Redmond, WA, USA; Microsoft Research Asia; Microsoft Research Asia; Microsoft, Sunnyvale, CA, USA", "aff_domain": "microsoft.com; ; ; ; ; ; ", "email": "microsoft.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/yan21a.html", "aff_unique_index": "0;1;0;0;0;0;0", "aff_unique_norm": "Microsoft;University of Science and Technology of China", "aff_unique_dep": "Microsoft Corporation;", "aff_unique_url": "https://www.microsoft.com;http://www.ustc.edu.cn", "aff_unique_abbr": "Microsoft;USTC", "aff_campus_unique_index": "0;0;0;2;2;3", "aff_campus_unique": "Redmond;;Asia;Sunnyvale", "aff_country_unique_index": "0;1;0;0;1;1;0", "aff_country_unique": "United States;China" }, { "title": "EMaQ: Expected-Max Q-Learning Operator for Simple Yet Effective Offline and Online RL", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9553", "id": "9553", "proceeding": "http://proceedings.mlr.press/v139/ghasemipour21a.html", "slides": "", "author_site": "Seyed Kamyar Seyed Ghasemipour, Dale Schuurmans, Shixiang Gu", "author": "Seyed Kamyar Seyed Ghasemipour; Dale Schuurmans; Shixiang Shane Gu", "abstract": "Off-policy reinforcement learning (RL) holds the promise of sample-efficient learning of decision-making policies by leveraging past experience. However, in the offline RL setting \u2013 where a fixed collection of interactions are provided and no further interactions are allowed \u2013 it has been shown that standard off-policy RL methods can significantly underperform. In this work, we closely investigate an important simplification of BCQ (Fujimoto et al., 2018) \u2013 a prior approach for offline RL \u2013 removing a heuristic design choice. Importantly, in contrast to their original theoretical considerations, we derive this simplified algorithm through the introduction of a novel backup operator, Expected-Max Q-Learning (EMaQ), which is more closely related to the resulting practical algorithm. Specifically, in addition to the distribution support, EMaQ explicitly considers the number of samples and the proposal distribution, allowing us to derive new sub-optimality bounds. In the offline RL setting \u2013 the main focus of this work \u2013 EMaQ matches and outperforms prior state-of-the-art in the D4RL benchmarks (Fu et al., 2020). In the online RL setting, we demonstrate that EMaQ is competitive with Soft Actor Critic (SAC). The key contributions of our empirical findings are demonstrating the importance of careful generative model design for estimating behavior policies, and an intuitive notion of complexity for offline RL problems. With its simple interpretation and fewer moving parts, such as no explicit function approximator representing the policy, EMaQ serves as a strong yet easy to implement baseline for future work.", "bibtex": "@InProceedings{pmlr-v139-ghasemipour21a,\n title = \t {EMaQ: Expected-Max Q-Learning Operator for Simple Yet Effective Offline and Online RL},\n author = {Ghasemipour, Seyed Kamyar Seyed and Schuurmans, Dale and Gu, Shixiang Shane},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3682--3691},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ghasemipour21a/ghasemipour21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ghasemipour21a.html},\n abstract = \t {Off-policy reinforcement learning (RL) holds the promise of sample-efficient learning of decision-making policies by leveraging past experience. However, in the offline RL setting \u2013 where a fixed collection of interactions are provided and no further interactions are allowed \u2013 it has been shown that standard off-policy RL methods can significantly underperform. In this work, we closely investigate an important simplification of BCQ (Fujimoto et al., 2018) \u2013 a prior approach for offline RL \u2013 removing a heuristic design choice. Importantly, in contrast to their original theoretical considerations, we derive this simplified algorithm through the introduction of a novel backup operator, Expected-Max Q-Learning (EMaQ), which is more closely related to the resulting practical algorithm. Specifically, in addition to the distribution support, EMaQ explicitly considers the number of samples and the proposal distribution, allowing us to derive new sub-optimality bounds. In the offline RL setting \u2013 the main focus of this work \u2013 EMaQ matches and outperforms prior state-of-the-art in the D4RL benchmarks (Fu et al., 2020). In the online RL setting, we demonstrate that EMaQ is competitive with Soft Actor Critic (SAC). The key contributions of our empirical findings are demonstrating the importance of careful generative model design for estimating behavior policies, and an intuitive notion of complexity for offline RL problems. With its simple interpretation and fewer moving parts, such as no explicit function approximator representing the policy, EMaQ serves as a strong yet easy to implement baseline for future work.}\n}", "pdf": "http://proceedings.mlr.press/v139/ghasemipour21a/ghasemipour21a.pdf", "supp": "", "pdf_size": 743184, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5481875536186180603&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Toronto, Toronto, Canada + Vector Institute, Toronto, Canada + Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA", "aff_domain": "cs.toronto.edu; ; ", "email": "cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ghasemipour21a.html", "aff_unique_index": "0+1+2;2;2", "aff_unique_norm": "University of Toronto;Vector Institute;Google", "aff_unique_dep": "Department of Computer Science;;Google Research", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai;https://research.google", "aff_unique_abbr": "U of T;Vector Institute;Google", "aff_campus_unique_index": "0+0+1;1;1", "aff_campus_unique": "Toronto;Mountain View", "aff_country_unique_index": "0+0+1;1;1", "aff_country_unique": "Canada;United States" }, { "title": "Efficient Deviation Types and Learning for Hindsight Rationality in Extensive-Form Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9549", "id": "9549", "proceeding": "http://proceedings.mlr.press/v139/morrill21a.html", "slides": "/media/icml-2021/Slides/9549.pdf", "author_site": "Dustin Morrill, Ryan D'Orazio, Marc Lanctot, James Wright, Michael Bowling, Amy Greenwald", "author": "Dustin Morrill; Ryan D\u2019Orazio; Marc Lanctot; James R Wright; Michael Bowling; Amy R Greenwald", "abstract": "Hindsight rationality is an approach to playing general-sum games that prescribes no-regret learning dynamics for individual agents with respect to a set of deviations, and further describes jointly rational behavior among multiple agents with mediated equilibria. To develop hindsight rational learning in sequential decision-making settings, we formalize behavioral deviations as a general class of deviations that respect the structure of extensive-form games. Integrating the idea of time selection into counterfactual regret minimization (CFR), we introduce the extensive-form regret minimization (EFR) algorithm that achieves hindsight rationality for any given set of behavioral deviations with computation that scales closely with the complexity of the set. We identify behavioral deviation subsets, the partial sequence deviation types, that subsume previously studied types and lead to efficient EFR instances in games with moderate lengths. In addition, we present a thorough empirical analysis of EFR instantiated with different deviation types in benchmark games, where we find that stronger types typically induce better performance.", "bibtex": "@InProceedings{pmlr-v139-morrill21a,\n title = \t {Efficient Deviation Types and Learning for Hindsight Rationality in Extensive-Form Games},\n author = {Morrill, Dustin and D'Orazio, Ryan and Lanctot, Marc and Wright, James R and Bowling, Michael and Greenwald, Amy R},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7818--7828},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/morrill21a/morrill21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/morrill21a.html},\n abstract = \t {Hindsight rationality is an approach to playing general-sum games that prescribes no-regret learning dynamics for individual agents with respect to a set of deviations, and further describes jointly rational behavior among multiple agents with mediated equilibria. To develop hindsight rational learning in sequential decision-making settings, we formalize behavioral deviations as a general class of deviations that respect the structure of extensive-form games. Integrating the idea of time selection into counterfactual regret minimization (CFR), we introduce the extensive-form regret minimization (EFR) algorithm that achieves hindsight rationality for any given set of behavioral deviations with computation that scales closely with the complexity of the set. We identify behavioral deviation subsets, the partial sequence deviation types, that subsume previously studied types and lead to efficient EFR instances in games with moderate lengths. In addition, we present a thorough empirical analysis of EFR instantiated with different deviation types in benchmark games, where we find that stronger types typically induce better performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/morrill21a/morrill21a.pdf", "supp": "", "pdf_size": 440066, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2350651197115820142&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computing Science, University of Alberta + Alberta Machine Intelligence Institute, Edmonton, Alberta, Canada; DIRO, Universite de Montreal, Quebec; Mila, Montreal, Canada; DeepMind; Department of Computing Science, University of Alberta + Alberta Machine Intelligence Institute, Edmonton, Alberta, Canada + DeepMind; Computer Science Department, Brown University, Providence, Rhode Island, United States", "aff_domain": "ualberta.ca; ; ; ; ; ", "email": "ualberta.ca; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/morrill21a.html", "aff_unique_index": "0+1;2;3;4;0+1+4;5", "aff_unique_norm": "University of Alberta;Alberta Machine Intelligence Institute;Universite de Montreal;Mila;DeepMind;Brown University", "aff_unique_dep": "Department of Computing Science;;DIRO;;;Computer Science Department", "aff_unique_url": "https://www.ualberta.ca;https://www.ami.ualberta.ca/;https://www.di.umontreal.ca;https://mila.quebec;https://deepmind.com;https://www.brown.edu", "aff_unique_abbr": "UAlberta;AMII;DIRO;Mila;DeepMind;Brown", "aff_campus_unique_index": "1;2;2;1;3", "aff_campus_unique": ";Edmonton;Montreal;Providence", "aff_country_unique_index": "0+0;0;0;1;0+0+1;2", "aff_country_unique": "Canada;United Kingdom;United States" }, { "title": "Efficient Differentiable Simulation of Articulated Bodies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9049", "id": "9049", "proceeding": "http://proceedings.mlr.press/v139/qiao21a.html", "slides": "/media/icml-2021/Slides/9049.pdf", "author_site": "Yi-Ling Qiao, Junbang Liang, Vladlen Koltun, Ming Lin", "author": "Yi-Ling Qiao; Junbang Liang; Vladlen Koltun; Ming C Lin", "abstract": "We present a method for efficient differentiable simulation of articulated bodies. This enables integration of articulated body dynamics into deep learning frameworks, and gradient-based optimization of neural networks that operate on articulated bodies. We derive the gradients of the contact solver using spatial algebra and the adjoint method. Our approach is an order of magnitude faster than autodiff tools. By only saving the initial states throughout the simulation process, our method reduces memory requirements by two orders of magnitude. We demonstrate the utility of efficient differentiable dynamics for articulated bodies in a variety of applications. We show that reinforcement learning with articulated systems can be accelerated using gradients provided by our method. In applications to control and inverse problems, gradient-based optimization enabled by our work accelerates convergence by more than an order of magnitude.", "bibtex": "@InProceedings{pmlr-v139-qiao21a,\n title = \t {Efficient Differentiable Simulation of Articulated Bodies},\n author = {Qiao, Yi-Ling and Liang, Junbang and Koltun, Vladlen and Lin, Ming C},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8661--8671},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiao21a/qiao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiao21a.html},\n abstract = \t {We present a method for efficient differentiable simulation of articulated bodies. This enables integration of articulated body dynamics into deep learning frameworks, and gradient-based optimization of neural networks that operate on articulated bodies. We derive the gradients of the contact solver using spatial algebra and the adjoint method. Our approach is an order of magnitude faster than autodiff tools. By only saving the initial states throughout the simulation process, our method reduces memory requirements by two orders of magnitude. We demonstrate the utility of efficient differentiable dynamics for articulated bodies in a variety of applications. We show that reinforcement learning with articulated systems can be accelerated using gradients provided by our method. In applications to control and inverse problems, gradient-based optimization enabled by our work accelerates convergence by more than an order of magnitude.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiao21a/qiao21a.pdf", "supp": "", "pdf_size": 1443428, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5132084062857319158&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Maryland, College Park+Intel Labs; University of Maryland, College Park+Intel Labs; Intel Labs; University of Maryland, College Park", "aff_domain": "cs.umd.edu;cs.umd.edu;intel.com;cs.umd.edu", "email": "cs.umd.edu;cs.umd.edu;intel.com;cs.umd.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/qiao21a.html", "aff_unique_index": "0+1;0+1;1;0", "aff_unique_norm": "University of Maryland;Intel", "aff_unique_dep": ";Intel Labs", "aff_unique_url": "https://www/umd.edu;https://www.intel.com", "aff_unique_abbr": "UMD;Intel", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "College Park;", "aff_country_unique_index": "0+0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Efficient Generative Modelling of Protein Structure Fragments using a Deep Markov Model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8501", "id": "8501", "proceeding": "http://proceedings.mlr.press/v139/thygesen21a.html", "slides": "", "author_site": "Christian Thygesen, Christian Skj\u00f8dt Steenmans, Ahmad Salim Al-Sibahi, Lys Sanz Moreta, Anders Bundg\u00e5rd S\u00f8rensen, Thomas Hamelryck", "author": "Christian B Thygesen; Christian Skj\u00f8dt Steenmans; Ahmad Salim Al-Sibahi; Lys Sanz Moreta; Anders Bundg\u00e5rd S\u00f8rensen; Thomas Hamelryck", "abstract": "Fragment libraries are often used in protein structure prediction, simulation and design as a means to significantly reduce the vast conformational search space. Current state-of-the-art methods for fragment library generation do not properly account for aleatory and epistemic uncertainty, respectively due to the dynamic nature of proteins and experimental errors in protein structures. Additionally, they typically rely on information that is not generally or readily available, such as homologous sequences, related protein structures and other complementary information. To address these issues, we developed BIFROST, a novel take on the fragment library problem based on a Deep Markov Model architecture combined with directional statistics for angular degrees of freedom, implemented in the deep probabilistic programming language Pyro. BIFROST is a probabilistic, generative model of the protein backbone dihedral angles conditioned solely on the amino acid sequence. BIFROST generates fragment libraries with a quality on par with current state-of-the-art methods at a fraction of the run-time, while requiring considerably less information and allowing efficient evaluation of probabilities.", "bibtex": "@InProceedings{pmlr-v139-thygesen21a,\n title = \t {Efficient Generative Modelling of Protein Structure Fragments using a Deep Markov Model},\n author = {Thygesen, Christian B and Steenmans, Christian Skj{\\o}dt and Al-Sibahi, Ahmad Salim and Moreta, Lys Sanz and S{\\o}rensen, Anders Bundg{\\aa}rd and Hamelryck, Thomas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10258--10267},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/thygesen21a/thygesen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/thygesen21a.html},\n abstract = \t {Fragment libraries are often used in protein structure prediction, simulation and design as a means to significantly reduce the vast conformational search space. Current state-of-the-art methods for fragment library generation do not properly account for aleatory and epistemic uncertainty, respectively due to the dynamic nature of proteins and experimental errors in protein structures. Additionally, they typically rely on information that is not generally or readily available, such as homologous sequences, related protein structures and other complementary information. To address these issues, we developed BIFROST, a novel take on the fragment library problem based on a Deep Markov Model architecture combined with directional statistics for angular degrees of freedom, implemented in the deep probabilistic programming language Pyro. BIFROST is a probabilistic, generative model of the protein backbone dihedral angles conditioned solely on the amino acid sequence. BIFROST generates fragment libraries with a quality on par with current state-of-the-art methods at a fraction of the run-time, while requiring considerably less information and allowing efficient evaluation of probabilities.}\n}", "pdf": "http://proceedings.mlr.press/v139/thygesen21a/thygesen21a.pdf", "supp": "", "pdf_size": 1523507, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12089128721861023275&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/thygesen21a.html" }, { "title": "Efficient Iterative Amortized Inference for Learning Symmetric and Disentangled Multi-Object Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9403", "id": "9403", "proceeding": "http://proceedings.mlr.press/v139/emami21a.html", "slides": "/media/icml-2021/Slides/9403.pdf", "author_site": "Patrick Emami, Pan He, Sanjay Ranka, Anand Rangarajan", "author": "Patrick Emami; Pan He; Sanjay Ranka; Anand Rangarajan", "abstract": "Unsupervised multi-object representation learning depends on inductive biases to guide the discovery of object-centric representations that generalize. However, we observe that methods for learning these representations are either impractical due to long training times and large memory consumption or forego key inductive biases. In this work, we introduce EfficientMORL, an efficient framework for the unsupervised learning of object-centric representations. We show that optimization challenges caused by requiring both symmetry and disentanglement can in fact be addressed by high-cost iterative amortized inference by designing the framework to minimize its dependence on it. We take a two-stage approach to inference: first, a hierarchical variational autoencoder extracts symmetric and disentangled representations through bottom-up inference, and second, a lightweight network refines the representations with top-down feedback. The number of refinement steps taken during training is reduced following a curriculum, so that at test time with zero steps the model achieves 99.1% of the refined decomposition performance. We demonstrate strong object decomposition and disentanglement on the standard multi-object benchmark while achieving nearly an order of magnitude faster training and test time inference over the previous state-of-the-art model.", "bibtex": "@InProceedings{pmlr-v139-emami21a,\n title = \t {Efficient Iterative Amortized Inference for Learning Symmetric and Disentangled Multi-Object Representations},\n author = {Emami, Patrick and He, Pan and Ranka, Sanjay and Rangarajan, Anand},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2970--2981},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/emami21a/emami21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/emami21a.html},\n abstract = \t {Unsupervised multi-object representation learning depends on inductive biases to guide the discovery of object-centric representations that generalize. However, we observe that methods for learning these representations are either impractical due to long training times and large memory consumption or forego key inductive biases. In this work, we introduce EfficientMORL, an efficient framework for the unsupervised learning of object-centric representations. We show that optimization challenges caused by requiring both symmetry and disentanglement can in fact be addressed by high-cost iterative amortized inference by designing the framework to minimize its dependence on it. We take a two-stage approach to inference: first, a hierarchical variational autoencoder extracts symmetric and disentangled representations through bottom-up inference, and second, a lightweight network refines the representations with top-down feedback. The number of refinement steps taken during training is reduced following a curriculum, so that at test time with zero steps the model achieves 99.1% of the refined decomposition performance. We demonstrate strong object decomposition and disentanglement on the standard multi-object benchmark while achieving nearly an order of magnitude faster training and test time inference over the previous state-of-the-art model.}\n}", "pdf": "http://proceedings.mlr.press/v139/emami21a/emami21a.pdf", "supp": "", "pdf_size": 6770335, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7263217510523036363&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of Florida, Gainesville, FL, USA; University of Florida, Gainesville, FL, USA; University of Florida, Gainesville, FL, USA; University of Florida, Gainesville, FL, USA", "aff_domain": "ufl.edu; ; ; ", "email": "ufl.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/emami21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Florida", "aff_unique_dep": "", "aff_unique_url": "https://www.ufl.edu", "aff_unique_abbr": "UF", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Gainesville", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Efficient Lottery Ticket Finding: Less Data is More", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10505", "id": "10505", "proceeding": "http://proceedings.mlr.press/v139/zhang21c.html", "slides": "", "author_site": "Zhenyu Zhang, Xuxi Chen, Tianlong Chen, Zhangyang \u201cAtlas\u201d Wang", "author": "Zhenyu Zhang; Xuxi Chen; Tianlong Chen; Zhangyang Wang", "abstract": "The lottery ticket hypothesis (LTH) reveals the existence of winning tickets (sparse but critical subnetworks) for dense networks, that can be trained in isolation from random initialization to match the latter\u2019s accuracies. However, finding winning tickets requires burdensome computations in the train-prune-retrain process, especially on large-scale datasets (e.g., ImageNet), restricting their practical benefits. This paper explores a new perspective on finding lottery tickets more efficiently, by doing so only with a specially selected subset of data, called Pruning-Aware Critical set (PrAC set), rather than using the full training set. The concept of PrAC set was inspired by the recent observation, that deep networks have samples that are either hard to memorize during training, or easy to forget during pruning. A PrAC set is thus hypothesized to capture those most challenging and informative examples for the dense model. We observe that a high-quality winning ticket can be found with training and pruning the dense network on the very compact PrAC set, which can substantially save training iterations for the ticket finding process. Extensive experiments validate our proposal across diverse datasets and network architectures. Specifically, on CIFAR-10, CIFAR-100, and Tiny ImageNet, we locate effective PrAC sets at 35.32%\u00a078.19% of their training set sizes. On top of them, we can obtain the same competitive winning tickets for the corresponding dense networks, yet saving up to 82.85%\u00a092.77%, 63.54%\u00a074.92%, and 76.14%\u00a086.56% training iterations, respectively. Crucially, we show that a PrAC set found is reusable across different network architectures, which can amortize the extra cost of finding PrAC sets, yielding a practical regime for efficient lottery ticket finding.", "bibtex": "@InProceedings{pmlr-v139-zhang21c,\n title = \t {Efficient Lottery Ticket Finding: Less Data is More},\n author = {Zhang, Zhenyu and Chen, Xuxi and Chen, Tianlong and Wang, Zhangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12380--12390},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21c/zhang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21c.html},\n abstract = \t {The lottery ticket hypothesis (LTH) reveals the existence of winning tickets (sparse but critical subnetworks) for dense networks, that can be trained in isolation from random initialization to match the latter\u2019s accuracies. However, finding winning tickets requires burdensome computations in the train-prune-retrain process, especially on large-scale datasets (e.g., ImageNet), restricting their practical benefits. This paper explores a new perspective on finding lottery tickets more efficiently, by doing so only with a specially selected subset of data, called Pruning-Aware Critical set (PrAC set), rather than using the full training set. The concept of PrAC set was inspired by the recent observation, that deep networks have samples that are either hard to memorize during training, or easy to forget during pruning. A PrAC set is thus hypothesized to capture those most challenging and informative examples for the dense model. We observe that a high-quality winning ticket can be found with training and pruning the dense network on the very compact PrAC set, which can substantially save training iterations for the ticket finding process. Extensive experiments validate our proposal across diverse datasets and network architectures. Specifically, on CIFAR-10, CIFAR-100, and Tiny ImageNet, we locate effective PrAC sets at 35.32%\u00a078.19% of their training set sizes. On top of them, we can obtain the same competitive winning tickets for the corresponding dense networks, yet saving up to 82.85%\u00a092.77%, 63.54%\u00a074.92%, and 76.14%\u00a086.56% training iterations, respectively. Crucially, we show that a PrAC set found is reusable across different network architectures, which can amortize the extra cost of finding PrAC sets, yielding a practical regime for efficient lottery ticket finding.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21c/zhang21c.pdf", "supp": "", "pdf_size": 4631330, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9030177952981756712&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Science and Technology of China+University of Texas at Austin; University of Texas at Austin; University of Texas at Austin; University of Texas at Austin", "aff_domain": "utexas.edu; ; ;utexas.edu", "email": "utexas.edu; ; ;utexas.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhang21c.html", "aff_unique_index": "0+1;1;1;1", "aff_unique_norm": "University of Science and Technology of China;University of Texas at Austin", "aff_unique_dep": ";", "aff_unique_url": "http://www.ustc.edu.cn;https://www.utexas.edu", "aff_unique_abbr": "USTC;UT Austin", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "0+1;1;1;1", "aff_country_unique": "China;United States" }, { "title": "Efficient Message Passing for 0\u20131 ILPs with Binary Decision Diagrams", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10673", "id": "10673", "proceeding": "http://proceedings.mlr.press/v139/lange21a.html", "slides": "", "author_site": "Jan-Hendrik Lange, Paul Swoboda", "author": "Jan-Hendrik Lange; Paul Swoboda", "abstract": "We present a message passing method for 0{\u2013}1 integer linear programs. Our algorithm is based on a decomposition of the original problem into subproblems that are represented as binary deci- sion diagrams. The resulting Lagrangean dual is solved iteratively by a series of efficient block coordinate ascent steps. Our method has linear iteration complexity in the size of the decomposi- tion and can be effectively parallelized. The char- acteristics of our approach are desirable towards solving ever larger problems arising in structured prediction. We present experimental results on combinatorial problems from MAP inference for Markov Random Fields, quadratic assignment, discrete tomography and cell tracking for develop- mental biology and show promising performance.", "bibtex": "@InProceedings{pmlr-v139-lange21a,\n title = \t {Efficient Message Passing for 0{\u2013}1 ILPs with Binary Decision Diagrams},\n author = {Lange, Jan-Hendrik and Swoboda, Paul},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6000--6010},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lange21a/lange21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lange21a.html},\n abstract = \t {We present a message passing method for 0{\u2013}1 integer linear programs. Our algorithm is based on a decomposition of the original problem into subproblems that are represented as binary deci- sion diagrams. The resulting Lagrangean dual is solved iteratively by a series of efficient block coordinate ascent steps. Our method has linear iteration complexity in the size of the decomposi- tion and can be effectively parallelized. The char- acteristics of our approach are desirable towards solving ever larger problems arising in structured prediction. We present experimental results on combinatorial problems from MAP inference for Markov Random Fields, quadratic assignment, discrete tomography and cell tracking for develop- mental biology and show promising performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/lange21a/lange21a.pdf", "supp": "", "pdf_size": 364947, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16792064356860454486&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of T\u00fcbingen, Germany; Max Planck Institute for Informatics, Saarbr\u00fccken, Germany", "aff_domain": "uni-tuebingen.de;mpi-inf.mpg.de", "email": "uni-tuebingen.de;mpi-inf.mpg.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lange21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of T\u00fcbingen;Max Planck Institute for Informatics", "aff_unique_dep": ";", "aff_unique_url": "https://www.uni-tuebingen.de/;https://mpi-inf.mpg.de", "aff_unique_abbr": "Uni T\u00fcbingen;MPII", "aff_campus_unique_index": "1", "aff_campus_unique": ";Saarbr\u00fccken", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "Efficient Online Learning for Dynamic k-Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8755", "id": "8755", "proceeding": "http://proceedings.mlr.press/v139/fotakis21a.html", "slides": "/media/icml-2021/Slides/8755.pdf", "author_site": "Dimitris Fotakis, Georgios Piliouras, Stratis Skoulakis", "author": "Dimitris Fotakis; Georgios Piliouras; Stratis Skoulakis", "abstract": "In this work, we study dynamic clustering problems from the perspective of online learning. We consider an online learning problem, called \\textit{Dynamic $k$-Clustering}, in which $k$ centers are maintained in a metric space over time (centers may change positions) such as a dynamically changing set of $r$ clients is served in the best possible way. The connection cost at round $t$ is given by the \\textit{$p$-norm} of the vector formed by the distance of each client to its closest center at round $t$, for some $p\\geq 1$. We design a \\textit{$\\Theta\\left( \\min(k,r) \\right)$-regret} polynomial-time online learning algorithm, while we show that, under some well-established computational complexity conjectures, \\textit{constant-regret} cannot be achieved in polynomial-time. In addition to the efficient solution of Dynamic $k$-Clustering, our work contributes to the long line of research of combinatorial online learning.", "bibtex": "@InProceedings{pmlr-v139-fotakis21a,\n title = \t {Efficient Online Learning for Dynamic k-Clustering},\n author = {Fotakis, Dimitris and Piliouras, Georgios and Skoulakis, Stratis},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3396--3406},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fotakis21a/fotakis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fotakis21a.html},\n abstract = \t {In this work, we study dynamic clustering problems from the perspective of online learning. We consider an online learning problem, called \\textit{Dynamic $k$-Clustering}, in which $k$ centers are maintained in a metric space over time (centers may change positions) such as a dynamically changing set of $r$ clients is served in the best possible way. The connection cost at round $t$ is given by the \\textit{$p$-norm} of the vector formed by the distance of each client to its closest center at round $t$, for some $p\\geq 1$. We design a \\textit{$\\Theta\\left( \\min(k,r) \\right)$-regret} polynomial-time online learning algorithm, while we show that, under some well-established computational complexity conjectures, \\textit{constant-regret} cannot be achieved in polynomial-time. In addition to the efficient solution of Dynamic $k$-Clustering, our work contributes to the long line of research of combinatorial online learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/fotakis21a/fotakis21a.pdf", "supp": "", "pdf_size": 826119, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4371282169958372875&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Departement of Electrical and Computer Engineering, National Technical University of Athens, Athens, Greece+Pillar of Engineering Systems and Design, Singapore University of Technology and Design, Singapore; Pillar of Engineering Systems and Design, Singapore University of Technology and Design, Singapore; Pillar of Engineering Systems and Design, Singapore University of Technology and Design, Singapore", "aff_domain": "cs.ntua.gr;sutd.edu.sg;sutd.edu.sg", "email": "cs.ntua.gr;sutd.edu.sg;sutd.edu.sg", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fotakis21a.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "National Technical University of Athens;Singapore University of Technology and Design", "aff_unique_dep": "Departement of Electrical and Computer Engineering;Pillar of Engineering Systems and Design", "aff_unique_url": "https://www.ntua.gr;https://www.sutd.edu.sg", "aff_unique_abbr": "NTUA;SUTD", "aff_campus_unique_index": "0", "aff_campus_unique": "Athens;", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "Greece;Singapore" }, { "title": "Efficient Performance Bounds for Primal-Dual Reinforcement Learning from Demonstrations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9901", "id": "9901", "proceeding": "http://proceedings.mlr.press/v139/kamoutsi21a.html", "slides": "", "author_site": "Angeliki Kamoutsi, Goran Banjac, John Lygeros", "author": "Angeliki Kamoutsi; Goran Banjac; John Lygeros", "abstract": "We consider large-scale Markov decision processes with an unknown cost function and address the problem of learning a policy from a finite set of expert demonstrations. \t\t\tWe assume that the learner is not allowed to interact with the expert and has no access to reinforcement signal of any kind. \t\t\tExisting inverse reinforcement learning methods come with strong theoretical guarantees, but are computationally expensive, while state-of-the-art policy optimization algorithms achieve significant empirical success, but are hampered by limited theoretical understanding. \t\t\tTo bridge the gap between theory and practice, we introduce a novel bilinear saddle-point framework using Lagrangian duality. \t\t\tThe proposed primal-dual viewpoint allows us to develop a model-free provably efficient algorithm through the lens of stochastic convex optimization. The method enjoys the advantages of simplicity of implementation, low memory requirements, and computational and sample complexities independent of the number of states. We further present an equivalent no-regret online-learning interpretation.", "bibtex": "@InProceedings{pmlr-v139-kamoutsi21a,\n title = \t {Efficient Performance Bounds for Primal-Dual Reinforcement Learning from Demonstrations},\n author = {Kamoutsi, Angeliki and Banjac, Goran and Lygeros, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5257--5268},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kamoutsi21a/kamoutsi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kamoutsi21a.html},\n abstract = \t {We consider large-scale Markov decision processes with an unknown cost function and address the problem of learning a policy from a finite set of expert demonstrations. \t\t\tWe assume that the learner is not allowed to interact with the expert and has no access to reinforcement signal of any kind. \t\t\tExisting inverse reinforcement learning methods come with strong theoretical guarantees, but are computationally expensive, while state-of-the-art policy optimization algorithms achieve significant empirical success, but are hampered by limited theoretical understanding. \t\t\tTo bridge the gap between theory and practice, we introduce a novel bilinear saddle-point framework using Lagrangian duality. \t\t\tThe proposed primal-dual viewpoint allows us to develop a model-free provably efficient algorithm through the lens of stochastic convex optimization. The method enjoys the advantages of simplicity of implementation, low memory requirements, and computational and sample complexities independent of the number of states. We further present an equivalent no-regret online-learning interpretation.}\n}", "pdf": "http://proceedings.mlr.press/v139/kamoutsi21a/kamoutsi21a.pdf", "supp": "", "pdf_size": 435822, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5269503921418241366&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Automatic Control Laboratory, ETH Zurich, Switzerland; Automatic Control Laboratory, ETH Zurich, Switzerland; Automatic Control Laboratory, ETH Zurich, Switzerland", "aff_domain": "ethz.ch; ; ", "email": "ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kamoutsi21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Automatic Control Laboratory", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Efficient Statistical Tests: A Neural Tangent Kernel Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9791", "id": "9791", "proceeding": "http://proceedings.mlr.press/v139/jia21a.html", "slides": "/media/icml-2021/Slides/9791.pdf", "author_site": "Sheng Jia, Ehsan Nezhadarya, Yuhuai Wu, Jimmy Ba", "author": "Sheng Jia; Ehsan Nezhadarya; Yuhuai Wu; Jimmy Ba", "abstract": "For machine learning models to make reliable predictions in deployment, one needs to ensure the previously unknown test samples need to be sufficiently similar to the training data. The commonly used shift-invariant kernels do not have the compositionality and fail to capture invariances in high-dimensional data in computer vision. We propose a shift-invariant convolutional neural tangent kernel (SCNTK) based outlier detector and two-sample tests with maximum mean discrepancy (MMD) that is O(n) in the number of samples due to using the random feature approximation. On MNIST and CIFAR10 with various types of dataset shifts, we empirically show that statistical tests with such compositional kernels, inherited from infinitely wide neural networks, achieve higher detection accuracy than existing non-parametric methods. Our method also provides a competitive alternative to adapted kernel methods that require a training phase.", "bibtex": "@InProceedings{pmlr-v139-jia21a,\n title = \t {Efficient Statistical Tests: A Neural Tangent Kernel Approach},\n author = {Jia, Sheng and Nezhadarya, Ehsan and Wu, Yuhuai and Ba, Jimmy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4893--4903},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jia21a/jia21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jia21a.html},\n abstract = \t {For machine learning models to make reliable predictions in deployment, one needs to ensure the previously unknown test samples need to be sufficiently similar to the training data. The commonly used shift-invariant kernels do not have the compositionality and fail to capture invariances in high-dimensional data in computer vision. We propose a shift-invariant convolutional neural tangent kernel (SCNTK) based outlier detector and two-sample tests with maximum mean discrepancy (MMD) that is O(n) in the number of samples due to using the random feature approximation. On MNIST and CIFAR10 with various types of dataset shifts, we empirically show that statistical tests with such compositional kernels, inherited from infinitely wide neural networks, achieve higher detection accuracy than existing non-parametric methods. Our method also provides a competitive alternative to adapted kernel methods that require a training phase.}\n}", "pdf": "http://proceedings.mlr.press/v139/jia21a/jia21a.pdf", "supp": "", "pdf_size": 3728129, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12219088238081368921&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of Toronto + Vector Institute; LG Electronics; University of Toronto + Vector Institute; University of Toronto + Vector Institute", "aff_domain": "cs.toronto.edu; ;utoronto.ca;cs.toronto.edu", "email": "cs.toronto.edu; ;utoronto.ca;cs.toronto.edu", "github": "https://github.com/Sheng-J/scntk", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jia21a.html", "aff_unique_index": "0+1;2;0+1;0+1", "aff_unique_norm": "University of Toronto;Vector Institute;LG", "aff_unique_dep": ";;LG Electronics", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.lg.com", "aff_unique_abbr": "U of T;Vector Institute;LG", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;1;0+0;0+0", "aff_country_unique": "Canada;South Korea" }, { "title": "Efficient Training of Robust Decision Trees Against Adversarial Examples", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10209", "id": "10209", "proceeding": "http://proceedings.mlr.press/v139/vos21a.html", "slides": "/media/icml-2021/Slides/10209.pdf", "author_site": "Dani\u00ebl Vos, Sicco Verwer", "author": "Dani\u00ebl Vos; Sicco Verwer", "abstract": "Current state-of-the-art algorithms for training robust decision trees have high runtime costs and require hours to run. We present GROOT, an efficient algorithm for training robust decision trees and random forests that runs in a matter of seconds to minutes. Where before the worst-case Gini impurity was computed iteratively, we find that we can solve this function analytically to improve time complexity from O(n) to O(1) in terms of n samples. Our results on both single trees and ensembles on 14 structured datasets as well as on MNIST and Fashion-MNIST demonstrate that GROOT runs several orders of magnitude faster than the state-of-the-art works and also shows better performance in terms of adversarial accuracy on structured data.", "bibtex": "@InProceedings{pmlr-v139-vos21a,\n title = \t {Efficient Training of Robust Decision Trees Against Adversarial Examples},\n author = {Vos, Dani{\\\"e}l and Verwer, Sicco},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10586--10595},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vos21a/vos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vos21a.html},\n abstract = \t {Current state-of-the-art algorithms for training robust decision trees have high runtime costs and require hours to run. We present GROOT, an efficient algorithm for training robust decision trees and random forests that runs in a matter of seconds to minutes. Where before the worst-case Gini impurity was computed iteratively, we find that we can solve this function analytically to improve time complexity from O(n) to O(1) in terms of n samples. Our results on both single trees and ensembles on 14 structured datasets as well as on MNIST and Fashion-MNIST demonstrate that GROOT runs several orders of magnitude faster than the state-of-the-art works and also shows better performance in terms of adversarial accuracy on structured data.}\n}", "pdf": "http://proceedings.mlr.press/v139/vos21a/vos21a.pdf", "supp": "", "pdf_size": 799495, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9227298780298647203&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Cyber Security Group, Delft University of Technology; Cyber Security Group, Delft University of Technology", "aff_domain": "tudelft.nl; ", "email": "tudelft.nl; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/vos21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Delft University of Technology", "aff_unique_dep": "Cyber Security Group", "aff_unique_url": "https://www.tudelft.nl", "aff_unique_abbr": "TUDelft", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "title": "EfficientNetV2: Smaller Models and Faster Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8849", "id": "8849", "proceeding": "http://proceedings.mlr.press/v139/tan21a.html", "slides": "", "author_site": "Mingxing Tan, Quoc Le", "author": "Mingxing Tan; Quoc Le", "abstract": "This paper introduces EfficientNetV2, a new family of convolutional networks that have faster training speed and better parameter efficiency than previous models. To develop these models, we use a combination of training-aware neural architecture search and scaling, to jointly optimize training speed and parameter efficiency. The models were searched from the search space enriched with new ops such as Fused-MBConv. Our experiments show that EfficientNetV2 models train much faster than state-of-the-art models while being up to 6.8x smaller. Our training can be further sped up by progressively increasing the image size during training, but it often causes a drop in accuracy. To compensate for this accuracy drop, we propose an improved method of progressive learning, which adaptively adjusts regularization (e.g. data augmentation) along with image size. With progressive learning, our EfficientNetV2 significantly outperforms previous models on ImageNet and CIFAR/Cars/Flowers datasets. By pretraining on the same ImageNet21k, our EfficientNetV2 achieves 87.3% top-1 accuracy on ImageNet ILSVRC2012, outperforming the recent ViT by 2.0% accuracy while training 5x-11x faster using the same computing resources.", "bibtex": "@InProceedings{pmlr-v139-tan21a,\n title = \t {EfficientNetV2: Smaller Models and Faster Training},\n author = {Tan, Mingxing and Le, Quoc},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10096--10106},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tan21a/tan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tan21a.html},\n abstract = \t {This paper introduces EfficientNetV2, a new family of convolutional networks that have faster training speed and better parameter efficiency than previous models. To develop these models, we use a combination of training-aware neural architecture search and scaling, to jointly optimize training speed and parameter efficiency. The models were searched from the search space enriched with new ops such as Fused-MBConv. Our experiments show that EfficientNetV2 models train much faster than state-of-the-art models while being up to 6.8x smaller. Our training can be further sped up by progressively increasing the image size during training, but it often causes a drop in accuracy. To compensate for this accuracy drop, we propose an improved method of progressive learning, which adaptively adjusts regularization (e.g. data augmentation) along with image size. With progressive learning, our EfficientNetV2 significantly outperforms previous models on ImageNet and CIFAR/Cars/Flowers datasets. By pretraining on the same ImageNet21k, our EfficientNetV2 achieves 87.3% top-1 accuracy on ImageNet ILSVRC2012, outperforming the recent ViT by 2.0% accuracy while training 5x-11x faster using the same computing resources.}\n}", "pdf": "http://proceedings.mlr.press/v139/tan21a/tan21a.pdf", "supp": "", "pdf_size": 636328, "gs_citation": 4128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11531745608575458402&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Google Research, Brain Team; Google Research, Brain Team", "aff_domain": "google.com; ", "email": "google.com; ", "github": "https://github.com/google/automl/tree/master/efficientnetv2", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/tan21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "EfficientTTS: An Efficient and High-Quality Text-to-Speech Architecture", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8945", "id": "8945", "proceeding": "http://proceedings.mlr.press/v139/miao21a.html", "slides": "/media/icml-2021/Slides/8945.pdf", "author_site": "Chenfeng Miao, Liang Shuang, Zhengchen Liu, Chen Minchuan, Jun Ma, Shaojun Wang, Jing Xiao", "author": "Chenfeng Miao; Liang Shuang; Zhengchen Liu; Chen Minchuan; Jun Ma; Shaojun Wang; Jing Xiao", "abstract": "In this work, we address the Text-to-Speech (TTS) task by proposing a non-autoregressive architecture called EfficientTTS. Unlike the dominant non-autoregressive TTS models, which are trained with the need of external aligners, EfficientTTS optimizes all its parameters with a stable, end-to-end training procedure, allowing for synthesizing high quality speech in a fast and efficient manner. EfficientTTS is motivated by a new monotonic alignment modeling approach, which specifies monotonic constraints to the sequence alignment with almost no increase of computation. By combining EfficientTTS with different feed-forward network structures, we develop a family of TTS models, including both text-to-melspectrogram and text-to-waveform networks. We experimentally show that the proposed models significantly outperform counterpart models such as Tacotron 2 and Glow-TTS in terms of speech quality, training efficiency and synthesis speed, while still producing the speeches of strong robustness and great diversity. In addition, we demonstrate that proposed approach can be easily extended to autoregressive models such as Tacotron 2.", "bibtex": "@InProceedings{pmlr-v139-miao21a,\n title = \t {EfficientTTS: An Efficient and High-Quality Text-to-Speech Architecture},\n author = {Miao, Chenfeng and Shuang, Liang and Liu, Zhengchen and Minchuan, Chen and Ma, Jun and Wang, Shaojun and Xiao, Jing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7700--7709},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/miao21a/miao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/miao21a.html},\n abstract = \t {In this work, we address the Text-to-Speech (TTS) task by proposing a non-autoregressive architecture called EfficientTTS. Unlike the dominant non-autoregressive TTS models, which are trained with the need of external aligners, EfficientTTS optimizes all its parameters with a stable, end-to-end training procedure, allowing for synthesizing high quality speech in a fast and efficient manner. EfficientTTS is motivated by a new monotonic alignment modeling approach, which specifies monotonic constraints to the sequence alignment with almost no increase of computation. By combining EfficientTTS with different feed-forward network structures, we develop a family of TTS models, including both text-to-melspectrogram and text-to-waveform networks. We experimentally show that the proposed models significantly outperform counterpart models such as Tacotron 2 and Glow-TTS in terms of speech quality, training efficiency and synthesis speed, while still producing the speeches of strong robustness and great diversity. In addition, we demonstrate that proposed approach can be easily extended to autoregressive models such as Tacotron 2.}\n}", "pdf": "http://proceedings.mlr.press/v139/miao21a/miao21a.pdf", "supp": "", "pdf_size": 1781081, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2294346178605077338&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Ping An Technology; Ping An Technology; Ping An Technology; Ping An Technology; Ping An Technology; Ping An Technology; Ping An Technology", "aff_domain": "126.com; ; ; ; ; ; ", "email": "126.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/miao21a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Ping An Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.pingan.com", "aff_unique_abbr": "Ping An", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Elastic Graph Neural Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9583", "id": "9583", "proceeding": "http://proceedings.mlr.press/v139/liu21k.html", "slides": "/media/icml-2021/Slides/9583.pdf", "author_site": "Xiaorui Liu, Wei Jin, Yao Ma, Yaxin Li, Hua Liu, Yiqi Wang, Ming Yan, Jiliang Tang", "author": "Xiaorui Liu; Wei Jin; Yao Ma; Yaxin Li; Hua Liu; Yiqi Wang; Ming Yan; Jiliang Tang", "abstract": "While many existing graph neural networks (GNNs) have been proven to perform $\\ell_2$-based graph smoothing that enforces smoothness globally, in this work we aim to further enhance the local smoothness adaptivity of GNNs via $\\ell_1$-based graph smoothing. As a result, we introduce a family of GNNs (Elastic GNNs) based on $\\ell_1$ and $\\ell_2$-based graph smoothing. In particular, we propose a novel and general message passing scheme into GNNs. This message passing algorithm is not only friendly to back-propagation training but also achieves the desired smoothing properties with a theoretical convergence guarantee. Experiments on semi-supervised learning tasks demonstrate that the proposed Elastic GNNs obtain better adaptivity on benchmark datasets and are significantly robust to graph adversarial attacks. The implementation of Elastic GNNs is available at \\url{https://github.com/lxiaorui/ElasticGNN}.", "bibtex": "@InProceedings{pmlr-v139-liu21k,\n title = \t {Elastic Graph Neural Networks},\n author = {Liu, Xiaorui and Jin, Wei and Ma, Yao and Li, Yaxin and Liu, Hua and Wang, Yiqi and Yan, Ming and Tang, Jiliang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6837--6849},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21k/liu21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21k.html},\n abstract = \t {While many existing graph neural networks (GNNs) have been proven to perform $\\ell_2$-based graph smoothing that enforces smoothness globally, in this work we aim to further enhance the local smoothness adaptivity of GNNs via $\\ell_1$-based graph smoothing. As a result, we introduce a family of GNNs (Elastic GNNs) based on $\\ell_1$ and $\\ell_2$-based graph smoothing. In particular, we propose a novel and general message passing scheme into GNNs. This message passing algorithm is not only friendly to back-propagation training but also achieves the desired smoothing properties with a theoretical convergence guarantee. Experiments on semi-supervised learning tasks demonstrate that the proposed Elastic GNNs obtain better adaptivity on benchmark datasets and are significantly robust to graph adversarial attacks. The implementation of Elastic GNNs is available at \\url{https://github.com/lxiaorui/ElasticGNN}.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21k/liu21k.pdf", "supp": "", "pdf_size": 1218486, "gs_citation": 156, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7978714464929950404&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; School of Mathematics, Shandong University, China; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computational Mathematics, Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA", "aff_domain": "msu.com; ; ; ; ; ; ; ", "email": "msu.com; ; ; ; ; ; ; ", "github": "https://github.com/lxiaorui/ElasticGNN", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/liu21k.html", "aff_unique_index": "0;0;0;0;1;0;0;0", "aff_unique_norm": "Michigan State University;Shandong University", "aff_unique_dep": "Department of Computer Science and Engineering;School of Mathematics", "aff_unique_url": "https://www.msu.edu;http://www.sdu.edu.cn", "aff_unique_abbr": "MSU;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;1;0;0;0", "aff_country_unique": "United States;China" }, { "title": "Elementary superexpressive activations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10131", "id": "10131", "proceeding": "http://proceedings.mlr.press/v139/yarotsky21a.html", "slides": "/media/icml-2021/Slides/10131.pdf", "author": "Dmitry Yarotsky", "abstract": "We call a finite family of activation functions \\emph{superexpressive} if any multivariate continuous function can be approximated by a neural network that uses these activations and has a fixed architecture only depending on the number of input variables (i.e., to achieve any accuracy we only need to adjust the weights, without increasing the number of neurons). Previously, it was known that superexpressive activations exist, but their form was quite complex. We give examples of very simple superexpressive families: for example, we prove that the family $\\{sin, arcsin\\}$ is superexpressive. We also show that most practical activations (not involving periodic functions) are not superexpressive.", "bibtex": "@InProceedings{pmlr-v139-yarotsky21a,\n title = \t {Elementary superexpressive activations},\n author = {Yarotsky, Dmitry},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11932--11940},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yarotsky21a/yarotsky21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yarotsky21a.html},\n abstract = \t {We call a finite family of activation functions \\emph{superexpressive} if any multivariate continuous function can be approximated by a neural network that uses these activations and has a fixed architecture only depending on the number of input variables (i.e., to achieve any accuracy we only need to adjust the weights, without increasing the number of neurons). Previously, it was known that superexpressive activations exist, but their form was quite complex. We give examples of very simple superexpressive families: for example, we prove that the family $\\{sin, arcsin\\}$ is superexpressive. We also show that most practical activations (not involving periodic functions) are not superexpressive.}\n}", "pdf": "http://proceedings.mlr.press/v139/yarotsky21a/yarotsky21a.pdf", "supp": "", "pdf_size": 308027, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17523233965006145993&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Skolkovo Institute of Science and Technology, Moscow, Russia", "aff_domain": "skoltech.ru", "email": "skoltech.ru", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/yarotsky21a.html", "aff_unique_index": "0", "aff_unique_norm": "Skolkovo Institute of Science and Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.skoltech.ru", "aff_unique_abbr": "Skoltech", "aff_campus_unique_index": "0", "aff_campus_unique": "Moscow", "aff_country_unique_index": "0", "aff_country_unique": "Russian Federation" }, { "title": "Emergent Social Learning via Multi-agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9105", "id": "9105", "proceeding": "http://proceedings.mlr.press/v139/ndousse21a.html", "slides": "", "author_site": "Kamal Ndousse, Douglas Eck, Sergey Levine, Natasha Jaques", "author": "Kamal K Ndousse; Douglas Eck; Sergey Levine; Natasha Jaques", "abstract": "Social learning is a key component of human and animal intelligence. By taking cues from the behavior of experts in their environment, social learners can acquire sophisticated behavior and rapidly adapt to new circumstances. This paper investigates whether independent reinforcement learning (RL) agents in a multi-agent environment can learn to use social learning to improve their performance. We find that in most circumstances, vanilla model-free RL agents do not use social learning. We analyze the reasons for this deficiency, and show that by imposing constraints on the training environment and introducing a model-based auxiliary loss we are able to obtain generalized social learning policies which enable agents to: i) discover complex skills that are not learned from single-agent training, and ii) adapt online to novel environments by taking cues from experts present in the new environment. In contrast, agents trained with model-free RL or imitation learning generalize poorly and do not succeed in the transfer tasks. By mixing multi-agent and solo training, we can obtain agents that use social learning to gain skills that they can deploy when alone, even out-performing agents trained alone from the start.", "bibtex": "@InProceedings{pmlr-v139-ndousse21a,\n title = \t {Emergent Social Learning via Multi-agent Reinforcement Learning},\n author = {Ndousse, Kamal K and Eck, Douglas and Levine, Sergey and Jaques, Natasha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7991--8004},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ndousse21a/ndousse21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ndousse21a.html},\n abstract = \t {Social learning is a key component of human and animal intelligence. By taking cues from the behavior of experts in their environment, social learners can acquire sophisticated behavior and rapidly adapt to new circumstances. This paper investigates whether independent reinforcement learning (RL) agents in a multi-agent environment can learn to use social learning to improve their performance. We find that in most circumstances, vanilla model-free RL agents do not use social learning. We analyze the reasons for this deficiency, and show that by imposing constraints on the training environment and introducing a model-based auxiliary loss we are able to obtain generalized social learning policies which enable agents to: i) discover complex skills that are not learned from single-agent training, and ii) adapt online to novel environments by taking cues from experts present in the new environment. In contrast, agents trained with model-free RL or imitation learning generalize poorly and do not succeed in the transfer tasks. By mixing multi-agent and solo training, we can obtain agents that use social learning to gain skills that they can deploy when alone, even out-performing agents trained alone from the start.}\n}", "pdf": "http://proceedings.mlr.press/v139/ndousse21a/ndousse21a.pdf", "supp": "", "pdf_size": 1323696, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9351968904862292986&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "OpenAI, San Francisco, CA, USA; Google Research \u2013 Brain team, Mountain View, CA, USA; Google Research \u2013 Brain team, Mountain View, CA, USA + UC Berkeley, Berkeley, CA, USA; Google Research \u2013 Brain team, Mountain View, CA, USA + UC Berkeley, Berkeley, CA, USA", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ndousse21a.html", "aff_unique_index": "0;1;1+2;1+2", "aff_unique_norm": "OpenAI;Google;University of California, Berkeley", "aff_unique_dep": ";Google Research \u2013 Brain team;", "aff_unique_url": "https://openai.com;https://research.google;https://www.berkeley.edu", "aff_unique_abbr": "OpenAI;Google;UC Berkeley", "aff_campus_unique_index": "0;1;1+2;1+2", "aff_campus_unique": "San Francisco;Mountain View;Berkeley", "aff_country_unique_index": "0;0;0+0;0+0", "aff_country_unique": "United States" }, { "title": "Emphatic Algorithms for Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9923", "id": "9923", "proceeding": "http://proceedings.mlr.press/v139/jiang21j.html", "slides": "", "author_site": "Ray Jiang, Tom Zahavy, Zhongwen Xu, Adam White, Matteo Hessel, Charles Blundell, Hado van Hasselt", "author": "Ray Jiang; Tom Zahavy; Zhongwen Xu; Adam White; Matteo Hessel; Charles Blundell; Hado Van Hasselt", "abstract": "Off-policy learning allows us to learn about possible policies of behavior from experience generated by a different behavior policy. Temporal difference (TD) learning algorithms can become unstable when combined with function approximation and off-policy sampling\u2014this is known as the \u201cdeadly triad\u201d. Emphatic temporal difference (ETD($\\lambda$)) algorithm ensures convergence in the linear case by appropriately weighting the TD($\\lambda$) updates. In this paper, we extend the use of emphatic methods to deep reinforcement learning agents. We show that naively adapting ETD($\\lambda$) to popular deep reinforcement learning algorithms, which use forward view multi-step returns, results in poor performance. We then derive new emphatic algorithms for use in the context of such algorithms, and we demonstrate that they provide noticeable benefits in small problems designed to highlight the instability of TD methods. Finally, we observed improved performance when applying these algorithms at scale on classic Atari games from the Arcade Learning Environment.", "bibtex": "@InProceedings{pmlr-v139-jiang21j,\n title = \t {Emphatic Algorithms for Deep Reinforcement Learning},\n author = {Jiang, Ray and Zahavy, Tom and Xu, Zhongwen and White, Adam and Hessel, Matteo and Blundell, Charles and Van Hasselt, Hado},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5023--5033},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21j/jiang21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21j.html},\n abstract = \t {Off-policy learning allows us to learn about possible policies of behavior from experience generated by a different behavior policy. Temporal difference (TD) learning algorithms can become unstable when combined with function approximation and off-policy sampling\u2014this is known as the \u201cdeadly triad\u201d. Emphatic temporal difference (ETD($\\lambda$)) algorithm ensures convergence in the linear case by appropriately weighting the TD($\\lambda$) updates. In this paper, we extend the use of emphatic methods to deep reinforcement learning agents. We show that naively adapting ETD($\\lambda$) to popular deep reinforcement learning algorithms, which use forward view multi-step returns, results in poor performance. We then derive new emphatic algorithms for use in the context of such algorithms, and we demonstrate that they provide noticeable benefits in small problems designed to highlight the instability of TD methods. Finally, we observed improved performance when applying these algorithms at scale on classic Atari games from the Arcade Learning Environment.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21j/jiang21j.pdf", "supp": "", "pdf_size": 2910464, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3863927304903335898&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK + Amii, Department of Computing Science, University of Alberta; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/jiang21j.html", "aff_unique_index": "0;0;0;0+1;0;0;0", "aff_unique_norm": "DeepMind;University of Alberta", "aff_unique_dep": ";Department of Computing Science", "aff_unique_url": "https://deepmind.com;https://www.ualberta.ca", "aff_unique_abbr": "DeepMind;UAlberta", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;0;0;0+1;0;0;0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "End-to-End Learning of Coherent Probabilistic Forecasts for Hierarchical Time Series", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10001", "id": "10001", "proceeding": "http://proceedings.mlr.press/v139/rangapuram21a.html", "slides": "", "author_site": "Syama Sundar Yadav Rangapuram, Lucien Werner, Konstantinos Benidis, Pedro Mercado, Jan Gasthaus, Tim Januschowski", "author": "Syama Sundar Rangapuram; Lucien D Werner; Konstantinos Benidis; Pedro Mercado; Jan Gasthaus; Tim Januschowski", "abstract": "This paper presents a novel approach for hierarchical time series forecasting that produces coherent, probabilistic forecasts without requiring any explicit post-processing reconciliation. Unlike the state-of-the-art, the proposed method simultaneously learns from all time series in the hierarchy and incorporates the reconciliation step into a single trainable model. This is achieved by applying the reparameterization trick and casting reconciliation as an optimization problem with a closed-form solution. These model features make end-to-end learning of hierarchical forecasts possible, while accomplishing the challenging task of generating forecasts that are both probabilistic and coherent. Importantly, our approach also accommodates general aggregation constraints including grouped and temporal hierarchies. An extensive empirical evaluation on real-world hierarchical datasets demonstrates the advantages of the proposed approach over the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-rangapuram21a,\n title = \t {End-to-End Learning of Coherent Probabilistic Forecasts for Hierarchical Time Series},\n author = {Rangapuram, Syama Sundar and Werner, Lucien D and Benidis, Konstantinos and Mercado, Pedro and Gasthaus, Jan and Januschowski, Tim},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8832--8843},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rangapuram21a/rangapuram21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rangapuram21a.html},\n abstract = \t {This paper presents a novel approach for hierarchical time series forecasting that produces coherent, probabilistic forecasts without requiring any explicit post-processing reconciliation. Unlike the state-of-the-art, the proposed method simultaneously learns from all time series in the hierarchy and incorporates the reconciliation step into a single trainable model. This is achieved by applying the reparameterization trick and casting reconciliation as an optimization problem with a closed-form solution. These model features make end-to-end learning of hierarchical forecasts possible, while accomplishing the challenging task of generating forecasts that are both probabilistic and coherent. Importantly, our approach also accommodates general aggregation constraints including grouped and temporal hierarchies. An extensive empirical evaluation on real-world hierarchical datasets demonstrates the advantages of the proposed approach over the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/rangapuram21a/rangapuram21a.pdf", "supp": "", "pdf_size": 1470045, "gs_citation": 91, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7600564643668410338&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "AWS AI Labs, Germany; Department of Computing & Mathematical Sciences, California Institute of Technology, Pasadena, California, USA + Work done while at Amazon; AWS AI Labs, Germany; AWS AI Labs, Germany; AWS AI Labs, Germany; AWS AI Labs, Germany", "aff_domain": "amazon.de; ; ; ; ; ", "email": "amazon.de; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/rangapuram21a.html", "aff_unique_index": "0;1+2;0;0;0;0", "aff_unique_norm": "AWS AI Labs;California Institute of Technology;Amazon", "aff_unique_dep": "AI Labs;Department of Computing & Mathematical Sciences;Amazon", "aff_unique_url": "https://aws.amazon.com/research/ai/;https://www.caltech.edu;https://www.amazon.com", "aff_unique_abbr": "AWS AI Labs;Caltech;Amazon", "aff_campus_unique_index": "1", "aff_campus_unique": ";Pasadena", "aff_country_unique_index": "0;1+1;0;0;0;0", "aff_country_unique": "Germany;United States" }, { "title": "Enhancing Robustness of Neural Networks through Fourier Stabilization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10041", "id": "10041", "proceeding": "http://proceedings.mlr.press/v139/raviv21a.html", "slides": "", "author_site": "Netanel Raviv, Aidan Kelley, Minzhe Guo, Yevgeniy Vorobeychik", "author": "Netanel Raviv; Aidan Kelley; Minzhe Guo; Yevgeniy Vorobeychik", "abstract": "Despite the considerable success of neural networks in security settings such as malware detection, such models have proved vulnerable to evasion attacks, in which attackers make slight changes to inputs (e.g., malware) to bypass detection. We propose a novel approach, Fourier stabilization, for designing evasion-robust neural networks with binary inputs. This approach, which is complementary to other forms of defense, replaces the weights of individual neurons with robust analogs derived using Fourier analytic tools. The choice of which neurons to stabilize in a neural network is then a combinatorial optimization problem, and we propose several methods for approximately solving it. We provide a formal bound on the per-neuron drop in accuracy due to Fourier stabilization, and experimentally demonstrate the effectiveness of the proposed approach in boosting robustness of neural networks in several detection settings. Moreover, we show that our approach effectively composes with adversarial training.", "bibtex": "@InProceedings{pmlr-v139-raviv21a,\n title = \t {Enhancing Robustness of Neural Networks through Fourier Stabilization},\n author = {Raviv, Netanel and Kelley, Aidan and Guo, Minzhe and Vorobeychik, Yevgeniy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8880--8889},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/raviv21a/raviv21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/raviv21a.html},\n abstract = \t {Despite the considerable success of neural networks in security settings such as malware detection, such models have proved vulnerable to evasion attacks, in which attackers make slight changes to inputs (e.g., malware) to bypass detection. We propose a novel approach, Fourier stabilization, for designing evasion-robust neural networks with binary inputs. This approach, which is complementary to other forms of defense, replaces the weights of individual neurons with robust analogs derived using Fourier analytic tools. The choice of which neurons to stabilize in a neural network is then a combinatorial optimization problem, and we propose several methods for approximately solving it. We provide a formal bound on the per-neuron drop in accuracy due to Fourier stabilization, and experimentally demonstrate the effectiveness of the proposed approach in boosting robustness of neural networks in several detection settings. Moreover, we show that our approach effectively composes with adversarial training.}\n}", "pdf": "http://proceedings.mlr.press/v139/raviv21a/raviv21a.pdf", "supp": "", "pdf_size": 533886, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5015259452047587361&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Engineering, Washington University in St. Louis; Department of Computer Science and Engineering, Washington University in St. Louis; Department of Computer Science and Engineering, Washington University in St. Louis; Department of Computer Science and Engineering, Washington University in St. Louis", "aff_domain": "wustl.edu; ; ; ", "email": "wustl.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/raviv21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Washington University in St. Louis", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://wustl.edu", "aff_unique_abbr": "WashU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "St. Louis", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Ensemble Bootstrapping for Q-Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8459", "id": "8459", "proceeding": "http://proceedings.mlr.press/v139/peer21a.html", "slides": "/media/icml-2021/Slides/8459.pdf", "author_site": "Oren Peer, Chen Tessler, Nadav Merlis, Ron Meir", "author": "Oren Peer; Chen Tessler; Nadav Merlis; Ron Meir", "abstract": "Q-learning (QL), a common reinforcement learning algorithm, suffers from over-estimation bias due to the maximization term in the optimal Bellman operator. This bias may lead to sub-optimal behavior. Double-Q-learning tackles this issue by utilizing two estimators, yet results in an under-estimation bias. Similar to over-estimation in Q-learning, in certain scenarios, the under-estimation bias may degrade performance. In this work, we introduce a new bias-reduced algorithm called Ensemble Bootstrapped Q-Learning (EBQL), a natural extension of Double-Q-learning to ensembles. We analyze our method both theoretically and empirically. Theoretically, we prove that EBQL-like updates yield lower MSE when estimating the maximal mean of a set of independent random variables. Empirically, we show that there exist domains where both over and under-estimation result in sub-optimal performance. Finally, We demonstrate the superior performance of a deep RL variant of EBQL over other deep QL algorithms for a suite of ATARI games.", "bibtex": "@InProceedings{pmlr-v139-peer21a,\n title = \t {Ensemble Bootstrapping for Q-Learning},\n author = {Peer, Oren and Tessler, Chen and Merlis, Nadav and Meir, Ron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8454--8463},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/peer21a/peer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/peer21a.html},\n abstract = \t {Q-learning (QL), a common reinforcement learning algorithm, suffers from over-estimation bias due to the maximization term in the optimal Bellman operator. This bias may lead to sub-optimal behavior. Double-Q-learning tackles this issue by utilizing two estimators, yet results in an under-estimation bias. Similar to over-estimation in Q-learning, in certain scenarios, the under-estimation bias may degrade performance. In this work, we introduce a new bias-reduced algorithm called Ensemble Bootstrapped Q-Learning (EBQL), a natural extension of Double-Q-learning to ensembles. We analyze our method both theoretically and empirically. Theoretically, we prove that EBQL-like updates yield lower MSE when estimating the maximal mean of a set of independent random variables. Empirically, we show that there exist domains where both over and under-estimation result in sub-optimal performance. Finally, We demonstrate the superior performance of a deep RL variant of EBQL over other deep QL algorithms for a suite of ATARI games.}\n}", "pdf": "http://proceedings.mlr.press/v139/peer21a/peer21a.pdf", "supp": "", "pdf_size": 2355581, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5317000239420464635&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Viterbi Faculty of Electrical Engineering, Technion Institute of Technology, Haifa, Israel; Viterbi Faculty of Electrical Engineering, Technion Institute of Technology, Haifa, Israel; Viterbi Faculty of Electrical Engineering, Technion Institute of Technology, Haifa, Israel; Viterbi Faculty of Electrical Engineering, Technion Institute of Technology, Haifa, Israel", "aff_domain": "campus.technion.ac.il; ; ; ", "email": "campus.technion.ac.il; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/peer21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Technion Institute of Technology", "aff_unique_dep": "Viterbi Faculty of Electrical Engineering", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Haifa", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "title": "Environment Inference for Invariant Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10603", "id": "10603", "proceeding": "http://proceedings.mlr.press/v139/creager21a.html", "slides": "", "author_site": "Elliot Creager, Joern-Henrik Jacobsen, Richard Zemel", "author": "Elliot Creager; Joern-Henrik Jacobsen; Richard Zemel", "abstract": "Learning models that gracefully handle distribution shifts is central to research on domain generalization, robust optimization, and fairness. A promising formulation is domain-invariant learning, which identifies the key issue of learning which features are domain-specific versus domain-invariant. An important assumption in this area is that the training examples are partitioned into \u201cdomains\u201d or \u201cenvironments\u201d. Our focus is on the more common setting where such partitions are not provided. We propose EIIL, a general framework for domain-invariant learning that incorporates Environment Inference to directly infer partitions that are maximally informative for downstream Invariant Learning. We show that EIIL outperforms invariant learning methods on the CMNIST benchmark without using environment labels, and significantly outperforms ERM on worst-group performance in the Waterbirds dataset. Finally, we establish connections between EIIL and algorithmic fairness, which enables EIIL to improve accuracy and calibration in a fair prediction problem.", "bibtex": "@InProceedings{pmlr-v139-creager21a,\n title = \t {Environment Inference for Invariant Learning},\n author = {Creager, Elliot and Jacobsen, Joern-Henrik and Zemel, Richard},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2189--2200},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/creager21a/creager21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/creager21a.html},\n abstract = \t {Learning models that gracefully handle distribution shifts is central to research on domain generalization, robust optimization, and fairness. A promising formulation is domain-invariant learning, which identifies the key issue of learning which features are domain-specific versus domain-invariant. An important assumption in this area is that the training examples are partitioned into \u201cdomains\u201d or \u201cenvironments\u201d. Our focus is on the more common setting where such partitions are not provided. We propose EIIL, a general framework for domain-invariant learning that incorporates Environment Inference to directly infer partitions that are maximally informative for downstream Invariant Learning. We show that EIIL outperforms invariant learning methods on the CMNIST benchmark without using environment labels, and significantly outperforms ERM on worst-group performance in the Waterbirds dataset. Finally, we establish connections between EIIL and algorithmic fairness, which enables EIIL to improve accuracy and calibration in a fair prediction problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/creager21a/creager21a.pdf", "supp": "", "pdf_size": 1477463, "gs_citation": 460, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7012730739761324020&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Toronto+Vector Institute; University of Toronto+Vector Institute; University of Toronto+Vector Institute", "aff_domain": "cs.toronto.edu; ; ", "email": "cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/creager21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of Toronto;Vector Institute", "aff_unique_dep": ";", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/", "aff_unique_abbr": "U of T;Vector Institute", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Canada" }, { "title": "Equivariant Learning of Stochastic Fields: Gaussian Processes and Steerable Conditional Neural Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9935", "id": "9935", "proceeding": "http://proceedings.mlr.press/v139/holderrieth21a.html", "slides": "", "author_site": "Peter Holderrieth, Michael Hutchinson, Yee-Whye Teh", "author": "Peter Holderrieth; Michael J Hutchinson; Yee Whye Teh", "abstract": "Motivated by objects such as electric fields or fluid streams, we study the problem of learning stochastic fields, i.e. stochastic processes whose samples are fields like those occurring in physics and engineering. Considering general transformations such as rotations and reflections, we show that spatial invariance of stochastic fields requires an inference model to be equivariant. Leveraging recent advances from the equivariance literature, we study equivariance in two classes of models. Firstly, we fully characterise equivariant Gaussian processes. Secondly, we introduce Steerable Conditional Neural Processes (SteerCNPs), a new, fully equivariant member of the Neural Process family. In experiments with Gaussian process vector fields, images, and real-world weather data, we observe that SteerCNPs significantly improve the performance of previous models and equivariance leads to improvements in transfer learning tasks.", "bibtex": "@InProceedings{pmlr-v139-holderrieth21a,\n title = \t {Equivariant Learning of Stochastic Fields: Gaussian Processes and Steerable Conditional Neural Processes},\n author = {Holderrieth, Peter and Hutchinson, Michael J and Teh, Yee Whye},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4297--4307},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/holderrieth21a/holderrieth21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/holderrieth21a.html},\n abstract = \t {Motivated by objects such as electric fields or fluid streams, we study the problem of learning stochastic fields, i.e. stochastic processes whose samples are fields like those occurring in physics and engineering. Considering general transformations such as rotations and reflections, we show that spatial invariance of stochastic fields requires an inference model to be equivariant. Leveraging recent advances from the equivariance literature, we study equivariance in two classes of models. Firstly, we fully characterise equivariant Gaussian processes. Secondly, we introduce Steerable Conditional Neural Processes (SteerCNPs), a new, fully equivariant member of the Neural Process family. In experiments with Gaussian process vector fields, images, and real-world weather data, we observe that SteerCNPs significantly improve the performance of previous models and equivariance leads to improvements in transfer learning tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/holderrieth21a/holderrieth21a.pdf", "supp": "", "pdf_size": 1604056, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12538236800312580419&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of Oxford; University of Oxford; University of Oxford + DeepMind", "aff_domain": "new.ox.ac.uk; ; ", "email": "new.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/holderrieth21a.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "University of Oxford;DeepMind", "aff_unique_dep": ";", "aff_unique_url": "https://www.ox.ac.uk;https://deepmind.com", "aff_unique_abbr": "Oxford;DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Equivariant Networks for Pixelized Spheres", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10393", "id": "10393", "proceeding": "http://proceedings.mlr.press/v139/shakerinava21a.html", "slides": "/media/icml-2021/Slides/10393.pdf", "author_site": "Mehran Shakerinava, Siamak Ravanbakhsh", "author": "Mehran Shakerinava; Siamak Ravanbakhsh", "abstract": "Pixelizations of Platonic solids such as the cube and icosahedron have been widely used to represent spherical data, from climate records to Cosmic Microwave Background maps. Platonic solids have well-known global symmetries. Once we pixelize each face of the solid, each face also possesses its own local symmetries in the form of Euclidean isometries. One way to combine these symmetries is through a hierarchy. However, this approach does not adequately model the interplay between the two levels of symmetry transformations. We show how to model this interplay using ideas from group theory, identify the equivariant linear maps, and introduce equivariant padding that respects these symmetries. Deep networks that use these maps as their building blocks generalize gauge equivariant CNNs on pixelized spheres. These deep networks achieve state-of-the-art results on semantic segmentation for climate data and omnidirectional image processing. Code is available at https://git.io/JGiZA.", "bibtex": "@InProceedings{pmlr-v139-shakerinava21a,\n title = \t {Equivariant Networks for Pixelized Spheres},\n author = {Shakerinava, Mehran and Ravanbakhsh, Siamak},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9477--9488},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shakerinava21a/shakerinava21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shakerinava21a.html},\n abstract = \t {Pixelizations of Platonic solids such as the cube and icosahedron have been widely used to represent spherical data, from climate records to Cosmic Microwave Background maps. Platonic solids have well-known global symmetries. Once we pixelize each face of the solid, each face also possesses its own local symmetries in the form of Euclidean isometries. One way to combine these symmetries is through a hierarchy. However, this approach does not adequately model the interplay between the two levels of symmetry transformations. We show how to model this interplay using ideas from group theory, identify the equivariant linear maps, and introduce equivariant padding that respects these symmetries. Deep networks that use these maps as their building blocks generalize gauge equivariant CNNs on pixelized spheres. These deep networks achieve state-of-the-art results on semantic segmentation for climate data and omnidirectional image processing. Code is available at https://git.io/JGiZA.}\n}", "pdf": "http://proceedings.mlr.press/v139/shakerinava21a/shakerinava21a.pdf", "supp": "", "pdf_size": 3286365, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5376976119268774088&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Computer Science, McGill University, Montreal, Canada + Mila - Quebec AI Institute; School of Computer Science, McGill University, Montreal, Canada + Mila - Quebec AI Institute", "aff_domain": "mila.quebec; ", "email": "mila.quebec; ", "github": "https://git.io/JGiZA", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/shakerinava21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "McGill University;Quebec AI Institute", "aff_unique_dep": "School of Computer Science;AI Institute", "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec", "aff_unique_abbr": "McGill;Mila", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Montreal;", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Canada" }, { "title": "Equivariant message passing for the prediction of tensorial properties and molecular spectra", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8499", "id": "8499", "proceeding": "http://proceedings.mlr.press/v139/schutt21a.html", "slides": "/media/icml-2021/Slides/8499.pdf", "author_site": "Kristof T Sch\u00fctt, Oliver Unke, Michael Gastegger", "author": "Kristof Sch\u00fctt; Oliver Unke; Michael Gastegger", "abstract": "Message passing neural networks have become a method of choice for learning on graphs, in particular the prediction of chemical properties and the acceleration of molecular dynamics studies. While they readily scale to large training data sets, previous approaches have proven to be less data efficient than kernel methods. We identify limitations of invariant representations as a major reason and extend the message passing formulation to rotationally equivariant representations. On this basis, we propose the polarizable atom interaction neural network (PaiNN) and improve on common molecule benchmarks over previous networks, while reducing model size and inference time. We leverage the equivariant atomwise representations obtained by PaiNN for the prediction of tensorial properties. Finally, we apply this to the simulation of molecular spectra, achieving speedups of 4-5 orders of magnitude compared to the electronic structure reference.", "bibtex": "@InProceedings{pmlr-v139-schutt21a,\n title = \t {Equivariant message passing for the prediction of tensorial properties and molecular spectra},\n author = {Sch{\\\"u}tt, Kristof and Unke, Oliver and Gastegger, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9377--9388},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/schutt21a/schutt21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/schutt21a.html},\n abstract = \t {Message passing neural networks have become a method of choice for learning on graphs, in particular the prediction of chemical properties and the acceleration of molecular dynamics studies. While they readily scale to large training data sets, previous approaches have proven to be less data efficient than kernel methods. We identify limitations of invariant representations as a major reason and extend the message passing formulation to rotationally equivariant representations. On this basis, we propose the polarizable atom interaction neural network (PaiNN) and improve on common molecule benchmarks over previous networks, while reducing model size and inference time. We leverage the equivariant atomwise representations obtained by PaiNN for the prediction of tensorial properties. Finally, we apply this to the simulation of molecular spectra, achieving speedups of 4-5 orders of magnitude compared to the electronic structure reference.}\n}", "pdf": "http://proceedings.mlr.press/v139/schutt21a/schutt21a.pdf", "supp": "", "pdf_size": 1143388, "gs_citation": 724, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17080060601890627793&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Machine Learning Group, Technische Universit\u00a8at Berlin, 10587 Berlin, Germany+Berlin Institute for the Foundations of Learning and Data, 10587 Berlin, Germany+BASLEARN \u2013 TU Berlin / BASF Joint Lab for Machine Learning, 10587 Berlin, Germany; Machine Learning Group, Technische Universit\u00a8at Berlin, 10587 Berlin, Germany+Berlin Institute for the Foundations of Learning and Data, 10587 Berlin, Germany; Machine Learning Group, Technische Universit\u00a8at Berlin, 10587 Berlin, Germany+BASLEARN \u2013 TU Berlin / BASF Joint Lab for Machine Learning, 10587 Berlin, Germany", "aff_domain": "tu-berlin.de; ;tu-berlin.de", "email": "tu-berlin.de; ;tu-berlin.de", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/schutt21a.html", "aff_unique_index": "0+1+2;0+1;0+2", "aff_unique_norm": "Technische Universit\u00e4t Berlin;Berlin Institute for the Foundations of Learning and Data;Technical University of Berlin", "aff_unique_dep": "Machine Learning Group;;BASLEARN \u2013 TU Berlin / BASF Joint Lab for Machine Learning", "aff_unique_url": "https://www.tu-berlin.de;;https://www.tu-berlin.de", "aff_unique_abbr": "TU Berlin;;TU Berlin", "aff_campus_unique_index": "0+0+0;0+0;0+0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0+0+0;0+0;0+0", "aff_country_unique": "Germany" }, { "title": "Estimating $\u03b1$-Rank from A Few Entries with Low Rank Matrix Completion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10661", "id": "10661", "proceeding": "http://proceedings.mlr.press/v139/du21e.html", "slides": "/media/icml-2021/Slides/10661.pdf", "author_site": "Yali Du, Xue Yan, Xu Chen, Jun Wang, Haifeng Zhang", "author": "Yali Du; Xue Yan; Xu Chen; Jun Wang; Haifeng Zhang", "abstract": "Multi-agent evaluation aims at the assessment of an agent\u2019s strategy on the basis of interaction with others. Typically, existing methods such as $\\alpha$-rank and its approximation still require to exhaustively compare all pairs of joint strategies for an accurate ranking, which in practice is computationally expensive. In this paper, we aim to reduce the number of pairwise comparisons in recovering a satisfying ranking for $n$ strategies in two-player meta-games, by exploring the fact that agents with similar skills may achieve similar payoffs against others. Two situations are considered: the first one is when we can obtain the true payoffs; the other one is when we can only access noisy payoff. Based on these formulations, we leverage low-rank matrix completion and design two novel algorithms for noise-free and noisy evaluations respectively. For both of these settings, we theorize that $O(nr \\log n)$ ($n$ is the number of agents and $r$ is the rank of the payoff matrix) payoff entries are required to achieve sufficiently well strategy evaluation performance. Empirical results on evaluating the strategies in three synthetic games and twelve real world games demonstrate that strategy evaluation from a few entries can lead to comparable performance to algorithms with full knowledge of the payoff matrix.", "bibtex": "@InProceedings{pmlr-v139-du21e,\n title = \t {Estimating $\u03b1$-Rank from A Few Entries with Low Rank Matrix Completion},\n author = {Du, Yali and Yan, Xue and Chen, Xu and Wang, Jun and Zhang, Haifeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2870--2879},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21e/du21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21e.html},\n abstract = \t {Multi-agent evaluation aims at the assessment of an agent\u2019s strategy on the basis of interaction with others. Typically, existing methods such as $\\alpha$-rank and its approximation still require to exhaustively compare all pairs of joint strategies for an accurate ranking, which in practice is computationally expensive. In this paper, we aim to reduce the number of pairwise comparisons in recovering a satisfying ranking for $n$ strategies in two-player meta-games, by exploring the fact that agents with similar skills may achieve similar payoffs against others. Two situations are considered: the first one is when we can obtain the true payoffs; the other one is when we can only access noisy payoff. Based on these formulations, we leverage low-rank matrix completion and design two novel algorithms for noise-free and noisy evaluations respectively. For both of these settings, we theorize that $O(nr \\log n)$ ($n$ is the number of agents and $r$ is the rank of the payoff matrix) payoff entries are required to achieve sufficiently well strategy evaluation performance. Empirical results on evaluating the strategies in three synthetic games and twelve real world games demonstrate that strategy evaluation from a few entries can lead to comparable performance to algorithms with full knowledge of the payoff matrix.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21e/du21e.pdf", "supp": "", "pdf_size": 2587291, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11530398268090474980&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University College London, UK; Institute of Automation, Chinese Academy of Sciences; Beijing Key Laboratory of Big Data Management and Analysis Methods, GSAI, Renmin University of China; University College London, UK; Institute of Automation, Chinese Academy of Sciences", "aff_domain": "gmail.com; ; ;ia.ac.cn;ia.ac.cn", "email": "gmail.com; ; ;ia.ac.cn;ia.ac.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/du21e.html", "aff_unique_index": "0;1;2;0;1", "aff_unique_norm": "University College London;Chinese Academy of Sciences;Renmin University of China", "aff_unique_dep": ";Institute of Automation;GSAI", "aff_unique_url": "https://www.ucl.ac.uk;http://www.ia.cas.cn;http://www.ruc.edu.cn", "aff_unique_abbr": "UCL;CAS;RUC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0;1;1;0;1", "aff_country_unique": "United Kingdom;China" }, { "title": "Estimating Identifiable Causal Effects on Markov Equivalence Class through Double Machine Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10727", "id": "10727", "proceeding": "http://proceedings.mlr.press/v139/jung21b.html", "slides": "/media/icml-2021/Slides/10727.pdf", "author_site": "Yonghan Jung, Jin Tian, Elias Bareinboim", "author": "Yonghan Jung; Jin Tian; Elias Bareinboim", "abstract": "General methods have been developed for estimating causal effects from observational data under causal assumptions encoded in the form of a causal graph. Most of this literature assumes that the underlying causal graph is completely specified. However, only observational data is available in most practical settings, which means that one can learn at most a Markov equivalence class (MEC) of the underlying causal graph. In this paper, we study the problem of causal estimation from a MEC represented by a partial ancestral graph (PAG), which is learnable from observational data. We develop a general estimator for any identifiable causal effects in a PAG. The result fills a gap for an end-to-end solution to causal inference from observational data to effects estimation. Specifically, we develop a complete identification algorithm that derives an influence function for any identifiable causal effects from PAGs. We then construct a double/debiased machine learning (DML) estimator that is robust to model misspecification and biases in nuisance function estimation, permitting the use of modern machine learning techniques. Simulation results corroborate with the theory.", "bibtex": "@InProceedings{pmlr-v139-jung21b,\n title = \t {Estimating Identifiable Causal Effects on Markov Equivalence Class through Double Machine Learning},\n author = {Jung, Yonghan and Tian, Jin and Bareinboim, Elias},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5168--5179},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jung21b/jung21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jung21b.html},\n abstract = \t {General methods have been developed for estimating causal effects from observational data under causal assumptions encoded in the form of a causal graph. Most of this literature assumes that the underlying causal graph is completely specified. However, only observational data is available in most practical settings, which means that one can learn at most a Markov equivalence class (MEC) of the underlying causal graph. In this paper, we study the problem of causal estimation from a MEC represented by a partial ancestral graph (PAG), which is learnable from observational data. We develop a general estimator for any identifiable causal effects in a PAG. The result fills a gap for an end-to-end solution to causal inference from observational data to effects estimation. Specifically, we develop a complete identification algorithm that derives an influence function for any identifiable causal effects from PAGs. We then construct a double/debiased machine learning (DML) estimator that is robust to model misspecification and biases in nuisance function estimation, permitting the use of modern machine learning techniques. Simulation results corroborate with the theory.}\n}", "pdf": "http://proceedings.mlr.press/v139/jung21b/jung21b.pdf", "supp": "", "pdf_size": 1013217, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=368013510525571248&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Purdue University, USA; Department of Computer Science, Iowa State University, USA; Department of Computer Science, Columbia University, USA", "aff_domain": "purdue.edu; ; ", "email": "purdue.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jung21b.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Purdue University;Iowa State University;Columbia University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.purdue.edu;https://www.iastate.edu;https://www.columbia.edu", "aff_unique_abbr": "Purdue;ISU;Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Estimation and Quantization of Expected Persistence Diagrams", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9625", "id": "9625", "proceeding": "http://proceedings.mlr.press/v139/divol21a.html", "slides": "", "author_site": "Vincent Divol, Theo Lacombe", "author": "Vincent Divol; Theo Lacombe", "abstract": "Persistence diagrams (PDs) are the most common descriptors used to encode the topology of structured data appearing in challenging learning tasks;\u00a0think e.g.\u00a0of graphs, time series or point clouds sampled close to a manifold. Given random objects and the corresponding distribution of PDs, one may want to build a statistical summary\u2014such as a mean\u2014of these random PDs, which is however not a trivial task as the natural geometry of the space of PDs is not linear. In this article, we study two such summaries, the Expected Persistence Diagram (EPD), and its quantization. The EPD is a measure supported on $\\mathbb{R}^2$, which may be approximated by its empirical counterpart. We prove that this estimator is optimal from a minimax standpoint on a large class of models with a parametric rate of convergence. The empirical EPD is simple and efficient to compute, but possibly has a very large support, hindering its use in practice. To overcome this issue, we propose an algorithm to compute a quantization of the empirical EPD, a measure with small support which is shown to approximate with near-optimal rates a quantization of the theoretical EPD.", "bibtex": "@InProceedings{pmlr-v139-divol21a,\n title = \t {Estimation and Quantization of Expected Persistence Diagrams},\n author = {Divol, Vincent and Lacombe, Theo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2760--2770},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/divol21a/divol21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/divol21a.html},\n abstract = \t {Persistence diagrams (PDs) are the most common descriptors used to encode the topology of structured data appearing in challenging learning tasks;\u00a0think e.g.\u00a0of graphs, time series or point clouds sampled close to a manifold. Given random objects and the corresponding distribution of PDs, one may want to build a statistical summary\u2014such as a mean\u2014of these random PDs, which is however not a trivial task as the natural geometry of the space of PDs is not linear. In this article, we study two such summaries, the Expected Persistence Diagram (EPD), and its quantization. The EPD is a measure supported on $\\mathbb{R}^2$, which may be approximated by its empirical counterpart. We prove that this estimator is optimal from a minimax standpoint on a large class of models with a parametric rate of convergence. The empirical EPD is simple and efficient to compute, but possibly has a very large support, hindering its use in practice. To overcome this issue, we propose an algorithm to compute a quantization of the empirical EPD, a measure with small support which is shown to approximate with near-optimal rates a quantization of the theoretical EPD.}\n}", "pdf": "http://proceedings.mlr.press/v139/divol21a/divol21a.pdf", "supp": "", "pdf_size": 1116136, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10408108786586733858&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Universit\u00e9 Paris-Saclay, CNRS, Inria, Laboratoire de Math\u00e9matiques d\u2019Orsay, 91405, Orsay, France; Universit\u00e9 Paris-Saclay, CNRS, Inria, Laboratoire de Math\u00e9matiques d\u2019Orsay, 91405, Orsay, France", "aff_domain": "inria.fr;inria.fr", "email": "inria.fr;inria.fr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/divol21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Universit\u00e9 Paris-Saclay", "aff_unique_dep": "Laboratoire de Math\u00e9matiques d\u2019Orsay", "aff_unique_url": "https://www.universite-paris-saclay.fr", "aff_unique_abbr": "UPS", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Orsay", "aff_country_unique_index": "0;0", "aff_country_unique": "France" }, { "title": "Evaluating Robustness of Predictive Uncertainty Estimation: Are Dirichlet-based Models Reliable?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10405", "id": "10405", "proceeding": "http://proceedings.mlr.press/v139/kopetzki21a.html", "slides": "", "author_site": "Anna-Kathrin Kopetzki, Bertrand Charpentier, Daniel Z\u00fcgner, Sandhya Giri, Stephan G\u00fcnnemann", "author": "Anna-Kathrin Kopetzki; Bertrand Charpentier; Daniel Z\u00fcgner; Sandhya Giri; Stephan G\u00fcnnemann", "abstract": "Dirichlet-based uncertainty (DBU) models are a recent and promising class of uncertainty-aware models. DBU models predict the parameters of a Dirichlet distribution to provide fast, high-quality uncertainty estimates alongside with class predictions. In this work, we present the first large-scale, in-depth study of the robustness of DBU models under adversarial attacks. Our results suggest that uncertainty estimates of DBU models are not robust w.r.t. three important tasks: (1) indicating correctly and wrongly classified samples; (2) detecting adversarial examples; and (3) distinguishing between in-distribution (ID) and out-of-distribution (OOD) data. Additionally, we explore the first approaches to make DBU mod- els more robust. While adversarial training has a minor effect, our median smoothing based ap- proach significantly increases robustness of DBU models.", "bibtex": "@InProceedings{pmlr-v139-kopetzki21a,\n title = \t {Evaluating Robustness of Predictive Uncertainty Estimation: Are Dirichlet-based Models Reliable?},\n author = {Kopetzki, Anna-Kathrin and Charpentier, Bertrand and Z{\\\"u}gner, Daniel and Giri, Sandhya and G{\\\"u}nnemann, Stephan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5707--5718},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kopetzki21a/kopetzki21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kopetzki21a.html},\n abstract = \t {Dirichlet-based uncertainty (DBU) models are a recent and promising class of uncertainty-aware models. DBU models predict the parameters of a Dirichlet distribution to provide fast, high-quality uncertainty estimates alongside with class predictions. In this work, we present the first large-scale, in-depth study of the robustness of DBU models under adversarial attacks. Our results suggest that uncertainty estimates of DBU models are not robust w.r.t. three important tasks: (1) indicating correctly and wrongly classified samples; (2) detecting adversarial examples; and (3) distinguishing between in-distribution (ID) and out-of-distribution (OOD) data. Additionally, we explore the first approaches to make DBU mod- els more robust. While adversarial training has a minor effect, our median smoothing based ap- proach significantly increases robustness of DBU models.}\n}", "pdf": "http://proceedings.mlr.press/v139/kopetzki21a/kopetzki21a.pdf", "supp": "", "pdf_size": 727452, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5773054947592188875&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kopetzki21a.html" }, { "title": "Evaluating the Implicit Midpoint Integrator for Riemannian Hamiltonian Monte Carlo", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10427", "id": "10427", "proceeding": "http://proceedings.mlr.press/v139/brofos21a.html", "slides": "", "author_site": "James Brofos, Roy Lederman", "author": "James Brofos; Roy R Lederman", "abstract": "Riemannian manifold Hamiltonian Monte Carlo is traditionally carried out using the generalized leapfrog integrator. However, this integrator is not the only choice and other integrators yielding valid Markov chain transition operators may be considered. In this work, we examine the implicit midpoint integrator as an alternative to the generalized leapfrog integrator. We discuss advantages and disadvantages of the implicit midpoint integrator for Hamiltonian Monte Carlo, its theoretical properties, and an empirical assessment of the critical attributes of such an integrator for Hamiltonian Monte Carlo: energy conservation, volume preservation, and reversibility. Empirically, we find that while leapfrog iterations are faster, the implicit midpoint integrator has better energy conservation, leading to higher acceptance rates, as well as better conservation of volume and better reversibility, arguably yielding a more accurate sampling procedure.", "bibtex": "@InProceedings{pmlr-v139-brofos21a,\n title = \t {Evaluating the Implicit Midpoint Integrator for Riemannian Hamiltonian Monte Carlo},\n author = {Brofos, James and Lederman, Roy R},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1072--1081},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brofos21a/brofos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brofos21a.html},\n abstract = \t {Riemannian manifold Hamiltonian Monte Carlo is traditionally carried out using the generalized leapfrog integrator. However, this integrator is not the only choice and other integrators yielding valid Markov chain transition operators may be considered. In this work, we examine the implicit midpoint integrator as an alternative to the generalized leapfrog integrator. We discuss advantages and disadvantages of the implicit midpoint integrator for Hamiltonian Monte Carlo, its theoretical properties, and an empirical assessment of the critical attributes of such an integrator for Hamiltonian Monte Carlo: energy conservation, volume preservation, and reversibility. Empirically, we find that while leapfrog iterations are faster, the implicit midpoint integrator has better energy conservation, leading to higher acceptance rates, as well as better conservation of volume and better reversibility, arguably yielding a more accurate sampling procedure.}\n}", "pdf": "http://proceedings.mlr.press/v139/brofos21a/brofos21a.pdf", "supp": "", "pdf_size": 624930, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8133615441384281097&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Statistics and Data Science, Yale University; Department of Statistics and Data Science, Yale University", "aff_domain": "yale.edu; ", "email": "yale.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/brofos21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Yale University", "aff_unique_dep": "Department of Statistics and Data Science", "aff_unique_url": "https://www.yale.edu", "aff_unique_abbr": "Yale", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Event Outlier Detection in Continuous Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8871", "id": "8871", "proceeding": "http://proceedings.mlr.press/v139/liu21g.html", "slides": "/media/icml-2021/Slides/8871.pdf", "author_site": "Siqi Liu, Milos Hauskrecht", "author": "Siqi Liu; Milos Hauskrecht", "abstract": "Continuous-time event sequences represent discrete events occurring in continuous time. Such sequences arise frequently in real-life. Usually we expect the sequences to follow some regular pattern over time. However, sometimes these patterns may be interrupted by unexpected absence or occurrences of events. Identification of these unexpected cases can be very important as they may point to abnormal situations that need human attention. In this work, we study and develop methods for detecting outliers in continuous-time event sequences, including unexpected absence and unexpected occurrences of events. Since the patterns that event sequences tend to follow may change in different contexts, we develop outlier detection methods based on point processes that can take context information into account. Our methods are based on Bayesian decision theory and hypothesis testing with theoretical guarantees. To test the performance of the methods, we conduct experiments on both synthetic data and real-world clinical data and show the effectiveness of the proposed methods.", "bibtex": "@InProceedings{pmlr-v139-liu21g,\n title = \t {Event Outlier Detection in Continuous Time},\n author = {Liu, Siqi and Hauskrecht, Milos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6793--6803},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21g/liu21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21g.html},\n abstract = \t {Continuous-time event sequences represent discrete events occurring in continuous time. Such sequences arise frequently in real-life. Usually we expect the sequences to follow some regular pattern over time. However, sometimes these patterns may be interrupted by unexpected absence or occurrences of events. Identification of these unexpected cases can be very important as they may point to abnormal situations that need human attention. In this work, we study and develop methods for detecting outliers in continuous-time event sequences, including unexpected absence and unexpected occurrences of events. Since the patterns that event sequences tend to follow may change in different contexts, we develop outlier detection methods based on point processes that can take context information into account. Our methods are based on Bayesian decision theory and hypothesis testing with theoretical guarantees. To test the performance of the methods, we conduct experiments on both synthetic data and real-world clinical data and show the effectiveness of the proposed methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21g/liu21g.pdf", "supp": "", "pdf_size": 1036764, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11315185602040849494&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of Pittsburgh, Pittsburgh, PA, USA+Borealis AI, Vancouver, BC, Canada; Department of Computer Science, University of Pittsburgh, Pittsburgh, PA, USA", "aff_domain": "cs.pitt.edu;pitt.edu", "email": "cs.pitt.edu;pitt.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/liu21g.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "University of Pittsburgh;Borealis AI", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.pitt.edu;https://www.borealisai.com", "aff_unique_abbr": "Pitt;", "aff_campus_unique_index": "0+1;0", "aff_campus_unique": "Pittsburgh;Vancouver", "aff_country_unique_index": "0+1;0", "aff_country_unique": "United States;Canada" }, { "title": "Evolving Attention with Residual Convolutions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8881", "id": "8881", "proceeding": "http://proceedings.mlr.press/v139/wang21ab.html", "slides": "", "author_site": "Yujing Wang, Yaming Yang, Jiangang Bai, Mingliang Zhang, Jing Bai, JING YU, Ce Zhang, Gao Huang, Yunhai Tong", "author": "Yujing Wang; Yaming Yang; Jiangang Bai; Mingliang Zhang; Jing Bai; Jing Yu; Ce Zhang; Gao Huang; Yunhai Tong", "abstract": "Transformer is a ubiquitous model for natural language processing and has attracted wide attentions in computer vision. The attention maps are indispensable for a transformer model to encode the dependencies among input tokens. However, they are learned independently in each layer and sometimes fail to capture precise patterns. In this paper, we propose a novel and generic mechanism based on evolving attention to improve the performance of transformers. On one hand, the attention maps in different layers share common knowledge, thus the ones in preceding layers can instruct the attention in succeeding layers through residual connections. On the other hand, low-level and high-level attentions vary in the level of abstraction, so we adopt convolutional layers to model the evolutionary process of attention maps. The proposed evolving attention mechanism achieves significant performance improvement over various state-of-the-art models for multiple tasks, including image classification, natural language understanding and machine translation.", "bibtex": "@InProceedings{pmlr-v139-wang21ab,\n title = \t {Evolving Attention with Residual Convolutions},\n author = {Wang, Yujing and Yang, Yaming and Bai, Jiangang and Zhang, Mingliang and Bai, Jing and Yu, Jing and Zhang, Ce and Huang, Gao and Tong, Yunhai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10971--10980},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21ab/wang21ab.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21ab.html},\n abstract = \t {Transformer is a ubiquitous model for natural language processing and has attracted wide attentions in computer vision. The attention maps are indispensable for a transformer model to encode the dependencies among input tokens. However, they are learned independently in each layer and sometimes fail to capture precise patterns. In this paper, we propose a novel and generic mechanism based on evolving attention to improve the performance of transformers. On one hand, the attention maps in different layers share common knowledge, thus the ones in preceding layers can instruct the attention in succeeding layers through residual connections. On the other hand, low-level and high-level attentions vary in the level of abstraction, so we adopt convolutional layers to model the evolutionary process of attention maps. The proposed evolving attention mechanism achieves significant performance improvement over various state-of-the-art models for multiple tasks, including image classification, natural language understanding and machine translation.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21ab/wang21ab.pdf", "supp": "", "pdf_size": 1688478, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10840753293659753469&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Peking University; Microsoft Research; Peking University+Microsoft Research; Peking University+Microsoft Research; Microsoft Research; Institute of Information Engineering, Chinese Academy of Sciences; ETH Zurich; Tsinghua University; Peking University", "aff_domain": "pku.edu.cn;microsoft.com; ; ; ; ; ; ;pku.edu.cn", "email": "pku.edu.cn;microsoft.com; ; ; ; ; ; ;pku.edu.cn", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/wang21ab.html", "aff_unique_index": "0;1;0+1;0+1;1;2;3;4;0", "aff_unique_norm": "Peking University;Microsoft;Chinese Academy of Sciences;ETH Zurich;Tsinghua University", "aff_unique_dep": ";Microsoft Research;Institute of Information Engineering;;", "aff_unique_url": "http://www.pku.edu.cn;https://www.microsoft.com/en-us/research;http://www.cas.cn;https://www.ethz.ch;https://www.tsinghua.edu.cn", "aff_unique_abbr": "Peking U;MSR;CAS;ETHZ;THU", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0+1;0+1;1;0;2;0;0", "aff_country_unique": "China;United States;Switzerland" }, { "title": "Exact Gap between Generalization Error and Uniform Convergence in Random Feature Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10045", "id": "10045", "proceeding": "http://proceedings.mlr.press/v139/yang21a.html", "slides": "", "author_site": "Zitong Yang, Yu Bai, Song Mei", "author": "Zitong Yang; Yu Bai; Song Mei", "abstract": "Recent work showed that there could be a large gap between the classical uniform convergence bound and the actual test error of zero-training-error predictors (interpolators) such as deep neural networks. To better understand this gap, we study the uniform convergence in the nonlinear random feature model and perform a precise theoretical analysis on how uniform convergence depends on the sample size and the number of parameters. We derive and prove analytical expressions for three quantities in this model: 1) classical uniform convergence over norm balls, 2) uniform convergence over interpolators in the norm ball (recently proposed by\u00a0\\citet{zhou2021uniform}), and 3) the risk of minimum norm interpolator. We show that, in the setting where the classical uniform convergence bound is vacuous (diverges to $\\infty$), uniform convergence over the interpolators still gives a non-trivial bound of the test error of interpolating solutions. We also showcase a different setting where classical uniform convergence bound is non-vacuous, but uniform convergence over interpolators can give an improved sample complexity guarantee. Our result provides a first exact comparison between the test errors and uniform convergence bounds for interpolators beyond simple linear models.", "bibtex": "@InProceedings{pmlr-v139-yang21a,\n title = \t {Exact Gap between Generalization Error and Uniform Convergence in Random Feature Models},\n author = {Yang, Zitong and Bai, Yu and Mei, Song},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11704--11715},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21a/yang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21a.html},\n abstract = \t {Recent work showed that there could be a large gap between the classical uniform convergence bound and the actual test error of zero-training-error predictors (interpolators) such as deep neural networks. To better understand this gap, we study the uniform convergence in the nonlinear random feature model and perform a precise theoretical analysis on how uniform convergence depends on the sample size and the number of parameters. We derive and prove analytical expressions for three quantities in this model: 1) classical uniform convergence over norm balls, 2) uniform convergence over interpolators in the norm ball (recently proposed by\u00a0\\citet{zhou2021uniform}), and 3) the risk of minimum norm interpolator. We show that, in the setting where the classical uniform convergence bound is vacuous (diverges to $\\infty$), uniform convergence over the interpolators still gives a non-trivial bound of the test error of interpolating solutions. We also showcase a different setting where classical uniform convergence bound is non-vacuous, but uniform convergence over interpolators can give an improved sample complexity guarantee. Our result provides a first exact comparison between the test errors and uniform convergence bounds for interpolators beyond simple linear models.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21a/yang21a.pdf", "supp": "", "pdf_size": 627352, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8929517168961587214&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Electrical Engineering and Computer Sciences, University of California, Berkeley; Salesforce Research; Department of Statistics, University of California, Berkeley", "aff_domain": "berkeley.edu;salesforce.com;berkeley.edu", "email": "berkeley.edu;salesforce.com;berkeley.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yang21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Berkeley;Salesforce", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences;Salesforce Research", "aff_unique_url": "https://www.berkeley.edu;https://research.salesforce.com", "aff_unique_abbr": "UC Berkeley;Salesforce", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Exact Optimization of Conformal Predictors via Incremental and Decremental Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9681", "id": "9681", "proceeding": "http://proceedings.mlr.press/v139/cherubin21a.html", "slides": "", "author_site": "Giovanni Cherubin, Konstantinos Chatzikokolakis, Martin Jaggi", "author": "Giovanni Cherubin; Konstantinos Chatzikokolakis; Martin Jaggi", "abstract": "Conformal Predictors (CP) are wrappers around ML models, providing error guarantees under weak assumptions on the data distribution. They are suitable for a wide range of problems, from classification and regression to anomaly detection. Unfortunately, their very high computational complexity limits their applicability to large datasets. In this work, we show that it is possible to speed up a CP classifier considerably, by studying it in conjunction with the underlying ML method, and by exploiting incremental&decremental learning. For methods such as k-NN, KDE, and kernel LS-SVM, our approach reduces the running time by one order of magnitude, whilst producing exact solutions. With similar ideas, we also achieve a linear speed up for the harder case of bootstrapping. Finally, we extend these techniques to improve upon an optimization of k-NN CP for regression. We evaluate our findings empirically, and discuss when methods are suitable for CP optimization.", "bibtex": "@InProceedings{pmlr-v139-cherubin21a,\n title = \t {Exact Optimization of Conformal Predictors via Incremental and Decremental Learning},\n author = {Cherubin, Giovanni and Chatzikokolakis, Konstantinos and Jaggi, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1836--1845},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cherubin21a/cherubin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cherubin21a.html},\n abstract = \t {Conformal Predictors (CP) are wrappers around ML models, providing error guarantees under weak assumptions on the data distribution. They are suitable for a wide range of problems, from classification and regression to anomaly detection. Unfortunately, their very high computational complexity limits their applicability to large datasets. In this work, we show that it is possible to speed up a CP classifier considerably, by studying it in conjunction with the underlying ML method, and by exploiting incremental&decremental learning. For methods such as k-NN, KDE, and kernel LS-SVM, our approach reduces the running time by one order of magnitude, whilst producing exact solutions. With similar ideas, we also achieve a linear speed up for the harder case of bootstrapping. Finally, we extend these techniques to improve upon an optimization of k-NN CP for regression. We evaluate our findings empirically, and discuss when methods are suitable for CP optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/cherubin21a/cherubin21a.pdf", "supp": "", "pdf_size": 461147, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9789883793705911412&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Alan Turing Institute, London, UK; University of Athens; EPFL", "aff_domain": "turing.ac.uk; ; ", "email": "turing.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cherubin21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Alan Turing Institute;University of Athens;EPFL", "aff_unique_dep": ";;", "aff_unique_url": "https://www.turing.ac.uk;https://www.uoa.gr;https://www.epfl.ch", "aff_unique_abbr": "ATI;UoA;EPFL", "aff_campus_unique_index": "0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;1;2", "aff_country_unique": "United Kingdom;Greece;Switzerland" }, { "title": "Examining and Combating Spurious Features under Distribution Shift", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9131", "id": "9131", "proceeding": "http://proceedings.mlr.press/v139/zhou21g.html", "slides": "", "author_site": "Chunting Zhou, Xuezhe Ma, Paul Michel, Graham Neubig", "author": "Chunting Zhou; Xuezhe Ma; Paul Michel; Graham Neubig", "abstract": "A central goal of machine learning is to learn robust representations that capture the fundamental relationship between inputs and output labels. However, minimizing training errors over finite or biased datasets results in models latching on to spurious correlations between the training input/output pairs that are not fundamental to the problem at hand. In this paper, we define and analyze robust and spurious representations using the information-theoretic concept of minimal sufficient statistics. We prove that even when there is only bias of the input distribution (i.e. covariate shift), models can still pick up spurious features from their training data. Group distributionally robust optimization (DRO) provides an effective tool to alleviate covariate shift by minimizing the worst-case training losses over a set of pre-defined groups. Inspired by our analysis, we demonstrate that group DRO can fail when groups do not directly account for various spurious correlations that occur in the data. To address this, we further propose to minimize the worst-case losses over a more flexible set of distributions that are defined on the joint distribution of groups and instances, instead of treating each group as a whole at optimization time. Through extensive experiments on one image and two language tasks, we show that our model is significantly more robust than comparable baselines under various partitions.", "bibtex": "@InProceedings{pmlr-v139-zhou21g,\n title = \t {Examining and Combating Spurious Features under Distribution Shift},\n author = {Zhou, Chunting and Ma, Xuezhe and Michel, Paul and Neubig, Graham},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12857--12867},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21g/zhou21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21g.html},\n abstract = \t {A central goal of machine learning is to learn robust representations that capture the fundamental relationship between inputs and output labels. However, minimizing training errors over finite or biased datasets results in models latching on to spurious correlations between the training input/output pairs that are not fundamental to the problem at hand. In this paper, we define and analyze robust and spurious representations using the information-theoretic concept of minimal sufficient statistics. We prove that even when there is only bias of the input distribution (i.e. covariate shift), models can still pick up spurious features from their training data. Group distributionally robust optimization (DRO) provides an effective tool to alleviate covariate shift by minimizing the worst-case training losses over a set of pre-defined groups. Inspired by our analysis, we demonstrate that group DRO can fail when groups do not directly account for various spurious correlations that occur in the data. To address this, we further propose to minimize the worst-case losses over a more flexible set of distributions that are defined on the joint distribution of groups and instances, instead of treating each group as a whole at optimization time. Through extensive experiments on one image and two language tasks, we show that our model is significantly more robust than comparable baselines under various partitions.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21g/zhou21g.pdf", "supp": "", "pdf_size": 1138834, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14520135804314510635&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Language Technologies Institute, Carnegie Mellon University, Pittsburgh, USA+1; Information Sciences Institute, University of Southern California, Los Angeles, USA+2; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, USA+1; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, USA+1", "aff_domain": "cs.cmu.edu; ; ; ", "email": "cs.cmu.edu; ; ; ", "github": "https://github.com/violet-zct/group-conditional-DRO", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhou21g.html", "aff_unique_index": "0;2;0;0", "aff_unique_norm": "Carnegie Mellon University;;University of Southern California", "aff_unique_dep": "Language Technologies Institute;;Information Sciences Institute", "aff_unique_url": "https://www.cmu.edu;;https://www.usc.edu", "aff_unique_abbr": "CMU;;USC", "aff_campus_unique_index": "0;2;0;0", "aff_campus_unique": "Pittsburgh;;Los Angeles", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States;" }, { "title": "Explainable Automated Graph Representation Learning with Hyperparameter Importance", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9679", "id": "9679", "proceeding": "http://proceedings.mlr.press/v139/wang21f.html", "slides": "", "author_site": "Xin Wang, Shuyi Fan, Kun Kuang, Wenwu Zhu", "author": "Xin Wang; Shuyi Fan; Kun Kuang; Wenwu Zhu", "abstract": "Current graph representation (GR) algorithms require huge demand of human experts in hyperparameter tuning, which significantly limits their practical applications, leading to an urge for automated graph representation without human intervention. Although automated machine learning (AutoML) serves as a good candidate for automatic hyperparameter tuning, little literature has been reported on automated graph presentation learning and the only existing work employs a black-box strategy, lacking insights into explaining the relative importance of different hyperparameters. To address this issue, we study explainable automated graph representation with hyperparameter importance in this paper. We propose an explainable AutoML approach for graph representation (e-AutoGR) which utilizes explainable graph features during performance estimation and learns decorrelated importance weights for different hyperparameters in affecting the model performance through a non-linear decorrelated weighting regression. These learned importance weights can in turn help to provide more insights in hyperparameter search procedure. We theoretically prove the soundness of the decorrelated weighting algorithm. Extensive experiments on real-world datasets demonstrate the superiority of our proposed e-AutoGR model against state-of-the-art methods in terms of both model performance and hyperparameter importance explainability.", "bibtex": "@InProceedings{pmlr-v139-wang21f,\n title = \t {Explainable Automated Graph Representation Learning with Hyperparameter Importance},\n author = {Wang, Xin and Fan, Shuyi and Kuang, Kun and Zhu, Wenwu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10727--10737},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21f/wang21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21f.html},\n abstract = \t {Current graph representation (GR) algorithms require huge demand of human experts in hyperparameter tuning, which significantly limits their practical applications, leading to an urge for automated graph representation without human intervention. Although automated machine learning (AutoML) serves as a good candidate for automatic hyperparameter tuning, little literature has been reported on automated graph presentation learning and the only existing work employs a black-box strategy, lacking insights into explaining the relative importance of different hyperparameters. To address this issue, we study explainable automated graph representation with hyperparameter importance in this paper. We propose an explainable AutoML approach for graph representation (e-AutoGR) which utilizes explainable graph features during performance estimation and learns decorrelated importance weights for different hyperparameters in affecting the model performance through a non-linear decorrelated weighting regression. These learned importance weights can in turn help to provide more insights in hyperparameter search procedure. We theoretically prove the soundness of the decorrelated weighting algorithm. Extensive experiments on real-world datasets demonstrate the superiority of our proposed e-AutoGR model against state-of-the-art methods in terms of both model performance and hyperparameter importance explainability.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21f/wang21f.pdf", "supp": "", "pdf_size": 5655515, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=120742486930848476&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Technology, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China; College of Computer Science and Technology, Zhejiang University, Hangzhou, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China", "aff_domain": "tsinghua.edu.cn; ; ;tsinghua.edu.cn", "email": "tsinghua.edu.cn; ; ;tsinghua.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21f.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Tsinghua University;Zhejiang University", "aff_unique_dep": "Department of Computer Science and Technology;College of Computer Science and Technology", "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.zju.edu.cn", "aff_unique_abbr": "THU;ZJU", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Beijing;Hangzhou", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Explaining Time Series Predictions with Dynamic Masks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8815", "id": "8815", "proceeding": "http://proceedings.mlr.press/v139/crabbe21a.html", "slides": "", "author_site": "Jonathan Crabb\u00e9, Mihaela van der Schaar", "author": "Jonathan Crabb\u00e9; Mihaela Van Der Schaar", "abstract": "How can we explain the predictions of a machine learning model? When the data is structured as a multivariate time series, this question induces additional difficulties such as the necessity for the explanation to embody the time dependency and the large number of inputs. To address these challenges, we propose dynamic masks (Dynamask). This method produces instance-wise importance scores for each feature at each time step by fitting a perturbation mask to the input sequence. In order to incorporate the time dependency of the data, Dynamask studies the effects of dynamic perturbation operators. In order to tackle the large number of inputs, we propose a scheme to make the feature selection parsimonious (to select no more feature than necessary) and legible (a notion that we detail by making a parallel with information theory). With synthetic and real-world data, we demonstrate that the dynamic underpinning of Dynamask, together with its parsimony, offer a neat improvement in the identification of feature importance over time. The modularity of Dynamask makes it ideal as a plug-in to increase the transparency of a wide range of machine learning models in areas such as medicine and finance, where time series are abundant.", "bibtex": "@InProceedings{pmlr-v139-crabbe21a,\n title = \t {Explaining Time Series Predictions with Dynamic Masks},\n author = {Crabb{\\'e}, Jonathan and Van Der Schaar, Mihaela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2166--2177},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/crabbe21a/crabbe21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/crabbe21a.html},\n abstract = \t {How can we explain the predictions of a machine learning model? When the data is structured as a multivariate time series, this question induces additional difficulties such as the necessity for the explanation to embody the time dependency and the large number of inputs. To address these challenges, we propose dynamic masks (Dynamask). This method produces instance-wise importance scores for each feature at each time step by fitting a perturbation mask to the input sequence. In order to incorporate the time dependency of the data, Dynamask studies the effects of dynamic perturbation operators. In order to tackle the large number of inputs, we propose a scheme to make the feature selection parsimonious (to select no more feature than necessary) and legible (a notion that we detail by making a parallel with information theory). With synthetic and real-world data, we demonstrate that the dynamic underpinning of Dynamask, together with its parsimony, offer a neat improvement in the identification of feature importance over time. The modularity of Dynamask makes it ideal as a plug-in to increase the transparency of a wide range of machine learning models in areas such as medicine and finance, where time series are abundant.}\n}", "pdf": "http://proceedings.mlr.press/v139/crabbe21a/crabbe21a.pdf", "supp": "", "pdf_size": 1616673, "gs_citation": 116, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3877310140943578440&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "DAMTP, University of Cambridge, UK+University of California Los Angeles, USA+The Alan Turing Institute, UK; DAMTP, University of Cambridge, UK+University of California Los Angeles, USA+The Alan Turing Institute, UK", "aff_domain": "cam.ac.uk;cam.ac.uk", "email": "cam.ac.uk;cam.ac.uk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/crabbe21a.html", "aff_unique_index": "0+1+2;0+1+2", "aff_unique_norm": "University of Cambridge;University of California, Los Angeles;Alan Turing Institute", "aff_unique_dep": "Department of Applied Mathematics and Theoretical Physics;;", "aff_unique_url": "https://www.cam.ac.uk;https://www.ucla.edu;https://www.turing.ac.uk", "aff_unique_abbr": "Cambridge;UCLA;ATI", "aff_campus_unique_index": "0+1;0+1", "aff_campus_unique": "Cambridge;Los Angeles;", "aff_country_unique_index": "0+1+0;0+1+0", "aff_country_unique": "United Kingdom;United States" }, { "title": "Explanations for Monotonic Classifiers.", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9955", "id": "9955", "proceeding": "http://proceedings.mlr.press/v139/marques-silva21a.html", "slides": "/media/icml-2021/Slides/9955.pdf", "author_site": "Joao Marques-Silva, Thomas Gerspacher, Martin Cooper, Alexey Ignatiev, Nina Narodytska", "author": "Joao Marques-Silva; Thomas Gerspacher; Martin C Cooper; Alexey Ignatiev; Nina Narodytska", "abstract": "In many classification tasks there is a requirement of monotonicity. Concretely, if all else remains constant, increasing (resp.\u00a0decreasing) the value of one or more features must not decrease (resp.\u00a0increase) the value of the prediction. Despite comprehensive efforts on learning monotonic classifiers, dedicated approaches for explaining monotonic classifiers are scarce and classifier-specific. This paper describes novel algorithms for the computation of one formal explanation of a (black-box) monotonic classifier. These novel algorithms are polynomial (indeed linear) in the run time complexity of the classifier. Furthermore, the paper presents a practically efficient model-agnostic algorithm for enumerating formal explanations.", "bibtex": "@InProceedings{pmlr-v139-marques-silva21a,\n title = \t {Explanations for Monotonic Classifiers.},\n author = {Marques-Silva, Joao and Gerspacher, Thomas and Cooper, Martin C and Ignatiev, Alexey and Narodytska, Nina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7469--7479},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/marques-silva21a/marques-silva21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/marques-silva21a.html},\n abstract = \t {In many classification tasks there is a requirement of monotonicity. Concretely, if all else remains constant, increasing (resp.\u00a0decreasing) the value of one or more features must not decrease (resp.\u00a0increase) the value of the prediction. Despite comprehensive efforts on learning monotonic classifiers, dedicated approaches for explaining monotonic classifiers are scarce and classifier-specific. This paper describes novel algorithms for the computation of one formal explanation of a (black-box) monotonic classifier. These novel algorithms are polynomial (indeed linear) in the run time complexity of the classifier. Furthermore, the paper presents a practically efficient model-agnostic algorithm for enumerating formal explanations.}\n}", "pdf": "http://proceedings.mlr.press/v139/marques-silva21a/marques-silva21a.pdf", "supp": "", "pdf_size": 315391, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16925635536337167437&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "IRIT, CNRS, Universit\u00e9 Paul Sabatier, Toulouse, France; IRIT, CNRS, Universit\u00e9 Paul Sabatier, Toulouse, France; IRIT, CNRS, Universit\u00e9 Paul Sabatier, Toulouse, France; Monash University, Melbourne, Australia; VMware Research, CA, USA", "aff_domain": "irit.fr; ; ; ; ", "email": "irit.fr; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/marques-silva21a.html", "aff_unique_index": "0;0;0;1;2", "aff_unique_norm": "Universit\u00e9 Paul Sabatier;Monash University;VMware Research", "aff_unique_dep": "IRIT;;", "aff_unique_url": "https://www.univ-tlse3.fr;https://www.monash.edu;https://research.vmware.com", "aff_unique_abbr": "UPS;Monash;VMware", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Toulouse;Melbourne;", "aff_country_unique_index": "0;0;0;1;2", "aff_country_unique": "France;Australia;United States" }, { "title": "Exploiting Shared Representations for Personalized Federated Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10309", "id": "10309", "proceeding": "http://proceedings.mlr.press/v139/collins21a.html", "slides": "", "author_site": "Liam Collins, Hamed Hassani, Aryan Mokhtari, Sanjay Shakkottai", "author": "Liam Collins; Hamed Hassani; Aryan Mokhtari; Sanjay Shakkottai", "abstract": "Deep neural networks have shown the ability to extract universal feature representations from data such as images and text that have been useful for a variety of learning tasks. However, the fruits of representation learning have yet to be fully-realized in federated settings. Although data in federated settings is often non-i.i.d. across clients, the success of centralized deep learning suggests that data often shares a global {\\em feature representation}, while the statistical heterogeneity across clients or tasks is concentrated in the {\\em labels}. Based on this intuition, we propose a novel federated learning framework and algorithm for learning a shared data representation across clients and unique local heads for each client. Our algorithm harnesses the distributed computational power across clients to perform many local-updates with respect to the low-dimensional local parameters for every update of the representation. We prove that this method obtains linear convergence to the ground-truth representation with near-optimal sample complexity in a linear setting, demonstrating that it can efficiently reduce the problem dimension for each client. Further, we provide extensive experimental results demonstrating the improvement of our method over alternative personalized federated learning approaches in heterogeneous settings.", "bibtex": "@InProceedings{pmlr-v139-collins21a,\n title = \t {Exploiting Shared Representations for Personalized Federated Learning},\n author = {Collins, Liam and Hassani, Hamed and Mokhtari, Aryan and Shakkottai, Sanjay},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2089--2099},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/collins21a/collins21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/collins21a.html},\n abstract = \t {Deep neural networks have shown the ability to extract universal feature representations from data such as images and text that have been useful for a variety of learning tasks. However, the fruits of representation learning have yet to be fully-realized in federated settings. Although data in federated settings is often non-i.i.d. across clients, the success of centralized deep learning suggests that data often shares a global {\\em feature representation}, while the statistical heterogeneity across clients or tasks is concentrated in the {\\em labels}. Based on this intuition, we propose a novel federated learning framework and algorithm for learning a shared data representation across clients and unique local heads for each client. Our algorithm harnesses the distributed computational power across clients to perform many local-updates with respect to the low-dimensional local parameters for every update of the representation. We prove that this method obtains linear convergence to the ground-truth representation with near-optimal sample complexity in a linear setting, demonstrating that it can efficiently reduce the problem dimension for each client. Further, we provide extensive experimental results demonstrating the improvement of our method over alternative personalized federated learning approaches in heterogeneous settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/collins21a/collins21a.pdf", "supp": "", "pdf_size": 3784501, "gs_citation": 966, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15594469304978697146&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Texas at Austin; University of Pennsylvania; University of Texas at Austin; University of Texas at Austin", "aff_domain": "utexas.edu; ; ; ", "email": "utexas.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/collins21a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Texas at Austin;University of Pennsylvania", "aff_unique_dep": ";", "aff_unique_url": "https://www.utexas.edu;https://www.upenn.edu", "aff_unique_abbr": "UT Austin;UPenn", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Austin;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Exploiting structured data for learning contagious diseases under incomplete testing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8807", "id": "8807", "proceeding": "http://proceedings.mlr.press/v139/makar21a.html", "slides": "", "author_site": "Maggie Makar, Lauren R West, David C Hooper, Eric Horvitz, Erica Shenoy, John Guttag", "author": "Maggie Makar; Lauren West; David Hooper; Eric Horvitz; Erica Shenoy; John Guttag", "abstract": "One of the ways that machine learning algorithms can help control the spread of an infectious disease is by building models that predict who is likely to become infected making them good candidates for preemptive interventions. In this work we ask: can we build reliable infection prediction models when the observed data is collected under limited, and biased testing that prioritizes testing symptomatic individuals? Our analysis suggests that when the infection is highly transmissible, incomplete testing might be sufficient to achieve good out-of-sample prediction error. Guided by this insight, we develop an algorithm that predicts infections, and show that it outperforms baselines on simulated data. We apply our model to data from a large hospital to predict Clostridioides difficile infections; a communicable disease that is characterized by both symptomatically infected and asymptomatic (i.e., untested) carriers. Using a proxy instead of the unobserved untested-infected state, we show that our model outperforms benchmarks in predicting infections.", "bibtex": "@InProceedings{pmlr-v139-makar21a,\n title = \t {Exploiting structured data for learning contagious diseases under incomplete testing},\n author = {Makar, Maggie and West, Lauren and Hooper, David and Horvitz, Eric and Shenoy, Erica and Guttag, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7348--7357},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/makar21a/makar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/makar21a.html},\n abstract = \t {One of the ways that machine learning algorithms can help control the spread of an infectious disease is by building models that predict who is likely to become infected making them good candidates for preemptive interventions. In this work we ask: can we build reliable infection prediction models when the observed data is collected under limited, and biased testing that prioritizes testing symptomatic individuals? Our analysis suggests that when the infection is highly transmissible, incomplete testing might be sufficient to achieve good out-of-sample prediction error. Guided by this insight, we develop an algorithm that predicts infections, and show that it outperforms baselines on simulated data. We apply our model to data from a large hospital to predict Clostridioides difficile infections; a communicable disease that is characterized by both symptomatically infected and asymptomatic (i.e., untested) carriers. Using a proxy instead of the unobserved untested-infected state, we show that our model outperforms benchmarks in predicting infections.}\n}", "pdf": "http://proceedings.mlr.press/v139/makar21a/makar21a.pdf", "supp": "", "pdf_size": 457525, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:TNVgWgtW4Q8J:scholar.google.com/&scioq=Exploiting+structured+data+for+learning+contagious+diseases+under+incomplete+testing&hl=en&as_sdt=0,33", "gs_version_total": 5, "aff": "CSAIL, MIT; Infection Control Unit, Massachusetts General Hospital; Infection Control Unit, Massachusetts General Hospital; Microsoft; Infection Control Unit, Massachusetts General Hospital; CSAIL, MIT", "aff_domain": "mit.edu; ; ; ; ; ", "email": "mit.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/makar21a.html", "aff_unique_index": "0;1;1;2;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Massachusetts General Hospital;Microsoft", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Infection Control Unit;Microsoft Corporation", "aff_unique_url": "https://www.csail.mit.edu;https://www.massgeneral.org;https://www.microsoft.com", "aff_unique_abbr": "MIT;;Microsoft", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Exploration in Approximate Hyper-State Space for Meta Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10379", "id": "10379", "proceeding": "http://proceedings.mlr.press/v139/zintgraf21a.html", "slides": "/media/icml-2021/Slides/10379.pdf", "author_site": "Luisa Zintgraf, Leo Feng, Cong Lu, Maximilian Igl, Kristian Hartikainen, Katja Hofmann, Shimon Whiteson", "author": "Luisa M Zintgraf; Leo Feng; Cong Lu; Maximilian Igl; Kristian Hartikainen; Katja Hofmann; Shimon Whiteson", "abstract": "To rapidly learn a new task, it is often essential for agents to explore efficiently - especially when performance matters from the first timestep. One way to learn such behaviour is via meta-learning. Many existing methods however rely on dense rewards for meta-training, and can fail catastrophically if the rewards are sparse. \tWithout a suitable reward signal, the need for exploration during meta-training is exacerbated. To address this, we propose HyperX, which uses novel reward bonuses for meta-training to explore in approximate hyper-state space (where hyper-states represent the environment state and the agent\u2019s task belief). We show empirically that HyperX meta-learns better task-exploration and adapts more successfully to new tasks than existing methods.", "bibtex": "@InProceedings{pmlr-v139-zintgraf21a,\n title = \t {Exploration in Approximate Hyper-State Space for Meta Reinforcement Learning},\n author = {Zintgraf, Luisa M and Feng, Leo and Lu, Cong and Igl, Maximilian and Hartikainen, Kristian and Hofmann, Katja and Whiteson, Shimon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12991--13001},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zintgraf21a/zintgraf21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zintgraf21a.html},\n abstract = \t {To rapidly learn a new task, it is often essential for agents to explore efficiently - especially when performance matters from the first timestep. One way to learn such behaviour is via meta-learning. Many existing methods however rely on dense rewards for meta-training, and can fail catastrophically if the rewards are sparse. \tWithout a suitable reward signal, the need for exploration during meta-training is exacerbated. To address this, we propose HyperX, which uses novel reward bonuses for meta-training to explore in approximate hyper-state space (where hyper-states represent the environment state and the agent\u2019s task belief). We show empirically that HyperX meta-learns better task-exploration and adapts more successfully to new tasks than existing methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zintgraf21a/zintgraf21a.pdf", "supp": "", "pdf_size": 2141604, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=598880115896472356&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Oxford, UK; Mila, Universit\u00e9 de Montr\u00e9al, Canada; University of Oxford, UK; University of Oxford, UK; University of Oxford, UK; Microsoft Research, Cambridge, UK; University of Oxford, UK", "aff_domain": "cs.ox.ac.uk; ; ; ; ; ; ", "email": "cs.ox.ac.uk; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zintgraf21a.html", "aff_unique_index": "0;1;0;0;0;2;0", "aff_unique_norm": "University of Oxford;Universit\u00e9 de Montr\u00e9al;Microsoft", "aff_unique_dep": ";Mila;Microsoft Research", "aff_unique_url": "https://www.ox.ac.uk;https://www.umontreal.ca;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Oxford;UM;MSR", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Montr\u00e9al;Cambridge", "aff_country_unique_index": "0;1;0;0;0;0;0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "Explore Visual Concept Formation for Image Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9891", "id": "9891", "proceeding": "http://proceedings.mlr.press/v139/xiong21a.html", "slides": "", "author_site": "Shengzhou Xiong, Yihua Tan, Guoyou Wang", "author": "Shengzhou Xiong; Yihua Tan; Guoyou Wang", "abstract": "Human beings acquire the ability of image classification through visual concept learning, in which the process of concept formation involves intertwined searches of common properties and concept descriptions. However, in most image classification algorithms using deep convolutional neural network (ConvNet), the representation space is constructed under the premise that concept descriptions are fixed as one-hot codes, which limits the mining of properties and the ability of identifying unseen samples. Inspired by this, we propose a learning strategy of visual concept formation (LSOVCF) based on the ConvNet, in which the two intertwined parts of concept formation, i.e. feature extraction and concept description, are learned together. First, LSOVCF takes sample response in the last layer of ConvNet to induct concept description being assumed as Gaussian distribution, which is part of the training process. Second, the exploration and experience loss is designed for optimization, which adopts experience cache pool to speed up convergence. Experiments show that LSOVCF improves the ability of identifying unseen samples on cifar10, STL10, flower17 and ImageNet based on several backbones, from the classic VGG to the SOTA Ghostnet. The code is available at \\url{https://github.com/elvintanhust/LSOVCF}.", "bibtex": "@InProceedings{pmlr-v139-xiong21a,\n title = \t {Explore Visual Concept Formation for Image Classification},\n author = {Xiong, Shengzhou and Tan, Yihua and Wang, Guoyou},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11470--11479},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xiong21a/xiong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/xiong21a.html},\n abstract = \t {Human beings acquire the ability of image classification through visual concept learning, in which the process of concept formation involves intertwined searches of common properties and concept descriptions. However, in most image classification algorithms using deep convolutional neural network (ConvNet), the representation space is constructed under the premise that concept descriptions are fixed as one-hot codes, which limits the mining of properties and the ability of identifying unseen samples. Inspired by this, we propose a learning strategy of visual concept formation (LSOVCF) based on the ConvNet, in which the two intertwined parts of concept formation, i.e. feature extraction and concept description, are learned together. First, LSOVCF takes sample response in the last layer of ConvNet to induct concept description being assumed as Gaussian distribution, which is part of the training process. Second, the exploration and experience loss is designed for optimization, which adopts experience cache pool to speed up convergence. Experiments show that LSOVCF improves the ability of identifying unseen samples on cifar10, STL10, flower17 and ImageNet based on several backbones, from the classic VGG to the SOTA Ghostnet. The code is available at \\url{https://github.com/elvintanhust/LSOVCF}.}\n}", "pdf": "http://proceedings.mlr.press/v139/xiong21a/xiong21a.pdf", "supp": "", "pdf_size": 835263, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8328805790307179012&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "National Key Laboratory of Science & Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan 430074, China; National Key Laboratory of Science & Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan 430074, China; National Key Laboratory of Science & Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan 430074, China", "aff_domain": "hust.edu.cn; ; ", "email": "hust.edu.cn; ; ", "github": "https://github.com/elvintanhust/LSOVCF", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xiong21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Huazhong University of Science and Technology", "aff_unique_dep": "School of Artificial Intelligence and Automation", "aff_unique_url": "http://www.hust.edu.cn", "aff_unique_abbr": "HUST", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Wuhan", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Exponential Lower Bounds for Batch Reinforcement Learning: Batch RL can be Exponentially Harder than Online RL", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8421", "id": "8421", "proceeding": "http://proceedings.mlr.press/v139/zanette21a.html", "slides": "", "author": "Andrea Zanette", "abstract": "Several practical applications of reinforcement learning involve an agent learning from past data without the possibility of further exploration. Often these applications require us to 1) identify a near optimal policy or to 2) estimate the value of a target policy. For both tasks we derive exponential information-theoretic lower bounds in discounted infinite horizon MDPs with a linear function representation for the action value function even if 1) realizability holds, 2) the batch algorithm observes the exact reward and transition functions, and 3) the batch algorithm is given the best a priori data distribution for the problem class. Our work introduces a new \u2018oracle + batch algorithm\u2019 framework to prove lower bounds that hold for every distribution. The work shows an exponential separation between batch and online reinforcement learning.", "bibtex": "@InProceedings{pmlr-v139-zanette21a,\n title = \t {Exponential Lower Bounds for Batch Reinforcement Learning: Batch RL can be Exponentially Harder than Online RL},\n author = {Zanette, Andrea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12287--12297},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zanette21a/zanette21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zanette21a.html},\n abstract = \t {Several practical applications of reinforcement learning involve an agent learning from past data without the possibility of further exploration. Often these applications require us to 1) identify a near optimal policy or to 2) estimate the value of a target policy. For both tasks we derive exponential information-theoretic lower bounds in discounted infinite horizon MDPs with a linear function representation for the action value function even if 1) realizability holds, 2) the batch algorithm observes the exact reward and transition functions, and 3) the batch algorithm is given the best a priori data distribution for the problem class. Our work introduces a new \u2018oracle + batch algorithm\u2019 framework to prove lower bounds that hold for every distribution. The work shows an exponential separation between batch and online reinforcement learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/zanette21a/zanette21a.pdf", "supp": "", "pdf_size": 560271, "gs_citation": 88, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=38093804890741721&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Institute for Computational and Mathematical Engineering, Stanford University, Stanford, USA", "aff_domain": "stanford.edu", "email": "stanford.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/zanette21a.html", "aff_unique_index": "0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Institute for Computational and Mathematical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Exponential Reduction in Sample Complexity with Learning of Ising Model Dynamics", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8691", "id": "8691", "proceeding": "http://proceedings.mlr.press/v139/dutt21a.html", "slides": "", "author_site": "Arkopal Dutt, Andrey Lokhov, Marc Vuffray, Sidhant Misra", "author": "Arkopal Dutt; Andrey Lokhov; Marc D Vuffray; Sidhant Misra", "abstract": "The usual setting for learning the structure and parameters of a graphical model assumes the availability of independent samples produced from the corresponding multivariate probability distribution. However, for many models the mixing time of the respective Markov chain can be very large and i.i.d. samples may not be obtained. We study the problem of reconstructing binary graphical models from correlated samples produced by a dynamical process, which is natural in many applications. We analyze the sample complexity of two estimators that are based on the interaction screening objective and the conditional likelihood loss. We observe that for samples coming from a dynamical process far from equilibrium, the sample complexity reduces exponentially compared to a dynamical process that mixes quickly.", "bibtex": "@InProceedings{pmlr-v139-dutt21a,\n title = \t {Exponential Reduction in Sample Complexity with Learning of Ising Model Dynamics},\n author = {Dutt, Arkopal and Lokhov, Andrey and Vuffray, Marc D and Misra, Sidhant},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2914--2925},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dutt21a/dutt21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dutt21a.html},\n abstract = \t {The usual setting for learning the structure and parameters of a graphical model assumes the availability of independent samples produced from the corresponding multivariate probability distribution. However, for many models the mixing time of the respective Markov chain can be very large and i.i.d. samples may not be obtained. We study the problem of reconstructing binary graphical models from correlated samples produced by a dynamical process, which is natural in many applications. We analyze the sample complexity of two estimators that are based on the interaction screening objective and the conditional likelihood loss. We observe that for samples coming from a dynamical process far from equilibrium, the sample complexity reduces exponentially compared to a dynamical process that mixes quickly.}\n}", "pdf": "http://proceedings.mlr.press/v139/dutt21a/dutt21a.pdf", "supp": "", "pdf_size": 2486070, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14788105086389586758&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Massachusetts Institute of Technology, Cambridge, MA, USA; Theoretical Division, Los Alamos National Laboratory, Los Alamos, NM, USA; Theoretical Division, Los Alamos National Laboratory, Los Alamos, NM, USA; Theoretical Division, Los Alamos National Laboratory, Los Alamos, NM, USA", "aff_domain": "mit.edu;lanl.gov;lanl.gov;lanl.gov", "email": "mit.edu;lanl.gov;lanl.gov;lanl.gov", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/dutt21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Massachusetts Institute of Technology;Los Alamos National Laboratory", "aff_unique_dep": ";Theoretical Division", "aff_unique_url": "https://www.mit.edu;https://www.lanl.gov", "aff_unique_abbr": "MIT;LANL", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Cambridge;Los Alamos", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Exponentially Many Local Minima in Quantum Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10073", "id": "10073", "proceeding": "http://proceedings.mlr.press/v139/you21c.html", "slides": "", "author_site": "Xuchen You, Xiaodi Wu", "author": "Xuchen You; Xiaodi Wu", "abstract": "Quantum Neural Networks (QNNs), or the so-called variational quantum circuits, are important quantum applications both because of their similar promises as classical neural networks and because of the feasibility of their implementation on near-term intermediate-size noisy quantum machines (NISQ). However, the training task of QNNs is challenging and much less understood. We conduct a quantitative investigation on the landscape of loss functions of QNNs and identify a class of simple yet extremely hard QNN instances for training. Specifically, we show for typical under-parameterized QNNs, there exists a dataset that induces a loss function with the number of spurious local minima depending exponentially on the number of parameters. Moreover, we show the optimality of our construction by providing an almost matching upper bound on such dependence. While local minima in classical neural networks are due to non-linear activations, in quantum neural networks local minima appear as a result of the quantum interference phenomenon. Finally, we empirically confirm that our constructions can indeed be hard instances in practice with typical gradient-based optimizers, which demonstrates the practical value of our findings.", "bibtex": "@InProceedings{pmlr-v139-you21c,\n title = \t {Exponentially Many Local Minima in Quantum Neural Networks},\n author = {You, Xuchen and Wu, Xiaodi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12144--12155},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/you21c/you21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/you21c.html},\n abstract = \t {Quantum Neural Networks (QNNs), or the so-called variational quantum circuits, are important quantum applications both because of their similar promises as classical neural networks and because of the feasibility of their implementation on near-term intermediate-size noisy quantum machines (NISQ). However, the training task of QNNs is challenging and much less understood. We conduct a quantitative investigation on the landscape of loss functions of QNNs and identify a class of simple yet extremely hard QNN instances for training. Specifically, we show for typical under-parameterized QNNs, there exists a dataset that induces a loss function with the number of spurious local minima depending exponentially on the number of parameters. Moreover, we show the optimality of our construction by providing an almost matching upper bound on such dependence. While local minima in classical neural networks are due to non-linear activations, in quantum neural networks local minima appear as a result of the quantum interference phenomenon. Finally, we empirically confirm that our constructions can indeed be hard instances in practice with typical gradient-based optimizers, which demonstrates the practical value of our findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/you21c/you21c.pdf", "supp": "", "pdf_size": 844706, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6698383446946477565&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Joint Center for Quantum Information and Computer Science, University of Maryland+Department of Computer Science and Institute for Advanced Computer Studies, University of Maryland; Joint Center for Quantum Information and Computer Science, University of Maryland+Department of Computer Science and Institute for Advanced Computer Studies, University of Maryland", "aff_domain": "umd.edu;cs.umd.edu", "email": "umd.edu;cs.umd.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/you21c.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "University of Maryland", "aff_unique_dep": "Joint Center for Quantum Information and Computer Science", "aff_unique_url": "https://www/umd.edu", "aff_unique_abbr": "", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "Expressive 1-Lipschitz Neural Networks for Robust Multiple Graph Learning against Adversarial Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9793", "id": "9793", "proceeding": "http://proceedings.mlr.press/v139/zhao21e.html", "slides": "", "author_site": "Xin Zhao, Zeru Zhang, Zijie Zhang, Lingfei Wu, Jiayin Jin, Yang Zhou, Ruoming Jin, Dejing Dou, Da Yan", "author": "Xin Zhao; Zeru Zhang; Zijie Zhang; Lingfei Wu; Jiayin Jin; Yang Zhou; Ruoming Jin; Dejing Dou; Da Yan", "abstract": "Recent findings have shown multiple graph learning models, such as graph classification and graph matching, are highly vulnerable to adversarial attacks, i.e. small input perturbations in graph structures and node attributes can cause the model failures. Existing defense techniques often defend specific attacks on particular multiple graph learning tasks. This paper proposes an attack-agnostic graph-adaptive 1-Lipschitz neural network, ERNN, for improving the robustness of deep multiple graph learning while achieving remarkable expressive power. A K_l-Lipschitz Weibull activation function is designed to enforce the gradient norm as K_l at layer l. The nearest matrix orthogonalization and polar decomposition techniques are utilized to constraint the weight norm as 1/K_l and make the norm-constrained weight close to the original weight. The theoretical analysis is conducted to derive lower and upper bounds of feasible K_l under the 1-Lipschitz constraint. The combination of norm-constrained weight and activation function leads to the 1-Lipschitz neural network for expressive and robust multiple graph learning.", "bibtex": "@InProceedings{pmlr-v139-zhao21e,\n title = \t {Expressive 1-Lipschitz Neural Networks for Robust Multiple Graph Learning against Adversarial Attacks},\n author = {Zhao, Xin and Zhang, Zeru and Zhang, Zijie and Wu, Lingfei and Jin, Jiayin and Zhou, Yang and Jin, Ruoming and Dou, Dejing and Yan, Da},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12719--12735},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhao21e/zhao21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhao21e.html},\n abstract = \t {Recent findings have shown multiple graph learning models, such as graph classification and graph matching, are highly vulnerable to adversarial attacks, i.e. small input perturbations in graph structures and node attributes can cause the model failures. Existing defense techniques often defend specific attacks on particular multiple graph learning tasks. This paper proposes an attack-agnostic graph-adaptive 1-Lipschitz neural network, ERNN, for improving the robustness of deep multiple graph learning while achieving remarkable expressive power. A K_l-Lipschitz Weibull activation function is designed to enforce the gradient norm as K_l at layer l. The nearest matrix orthogonalization and polar decomposition techniques are utilized to constraint the weight norm as 1/K_l and make the norm-constrained weight close to the original weight. The theoretical analysis is conducted to derive lower and upper bounds of feasible K_l under the 1-Lipschitz constraint. The combination of norm-constrained weight and activation function leads to the 1-Lipschitz neural network for expressive and robust multiple graph learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhao21e/zhao21e.pdf", "supp": "", "pdf_size": 638004, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13407308401883474864&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Auburn University, USA+JD.COM Silicon Valley Research Center, USA; Auburn University, USA+JD.COM Silicon Valley Research Center, USA; Auburn University, USA+JD.COM Silicon Valley Research Center, USA; JD.COM Silicon Valley Research Center, USA; Auburn University, USA+JD.COM Silicon Valley Research Center, USA; Auburn University, USA+JD.COM Silicon Valley Research Center, USA; Kent State University, USA; University of Oregon, USA+Baidu Research, China; University of Alabama at Birmingham, USA", "aff_domain": "googol.com;eden.co.uk;auburn.edu; ; ; ; ; ; ", "email": "googol.com;eden.co.uk;auburn.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/zhao21e.html", "aff_unique_index": "0+1;0+1;0+1;1;0+1;0+1;2;3+4;5", "aff_unique_norm": "Auburn University;JD.com;Kent State University;University of Oregon;Baidu;University of Alabama at Birmingham", "aff_unique_dep": ";Research Center;;;Baidu Research;", "aff_unique_url": "https://www.auburn.edu;https://www.jd.com;https://www.kent.edu;https://www.uoregon.edu;https://research.baidu.com;https://www.uab.edu", "aff_unique_abbr": "Auburn;JD;KSU;UO;Baidu;UAB", "aff_campus_unique_index": "1;1;1;1;1;1;;2", "aff_campus_unique": ";Silicon Valley;Birmingham", "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0+0;0;0+1;0", "aff_country_unique": "United States;China" }, { "title": "FILTRA: Rethinking Steerable CNN by Filter Transform", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8859", "id": "8859", "proceeding": "http://proceedings.mlr.press/v139/li21v.html", "slides": "/media/icml-2021/Slides/8859.pdf", "author_site": "Bo Li, Qili Wang, Gim Hee Lee", "author": "Bo Li; Qili Wang; Gim Hee Lee", "abstract": "Steerable CNN imposes the prior knowledge of transformation invariance or equivariance in the network architecture to enhance the the network robustness on geometry transformation of data and reduce overfitting. It has been an intuitive and widely used technique to construct a steerable filter by augmenting a filter with its transformed copies in the past decades, which is named as filter transform in this paper. Recently, the problem of steerable CNN has been studied from aspect of group representation theory, which reveals the function space structure of a steerable kernel function. However, it is not yet clear on how this theory is related to the filter transform technique. In this paper, we show that kernel constructed by filter transform can also be interpreted in the group representation theory. This interpretation help complete the puzzle of steerable CNN theory and provides a novel and simple approach to implement steerable convolution operators. Experiments are executed on multiple datasets to verify the feasibility of the proposed approach.", "bibtex": "@InProceedings{pmlr-v139-li21v,\n title = \t {FILTRA: Rethinking Steerable CNN by Filter Transform},\n author = {Li, Bo and Wang, Qili and Lee, Gim Hee},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6515--6522},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21v/li21v.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21v.html},\n abstract = \t {Steerable CNN imposes the prior knowledge of transformation invariance or equivariance in the network architecture to enhance the the network robustness on geometry transformation of data and reduce overfitting. It has been an intuitive and widely used technique to construct a steerable filter by augmenting a filter with its transformed copies in the past decades, which is named as filter transform in this paper. Recently, the problem of steerable CNN has been studied from aspect of group representation theory, which reveals the function space structure of a steerable kernel function. However, it is not yet clear on how this theory is related to the filter transform technique. In this paper, we show that kernel constructed by filter transform can also be interpreted in the group representation theory. This interpretation help complete the puzzle of steerable CNN theory and provides a novel and simple approach to implement steerable convolution operators. Experiments are executed on multiple datasets to verify the feasibility of the proposed approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21v/li21v.pdf", "supp": "", "pdf_size": 411722, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12773800134537729615&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "JD Technology; JD Technology; National University of Singapore", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21v.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "JD;National University of Singapore", "aff_unique_dep": "JD Technology;", "aff_unique_url": "https://www.jd.com;https://www.nus.edu.sg", "aff_unique_abbr": "JD;NUS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "China;Singapore" }, { "title": "FL-NTK: A Neural Tangent Kernel-based Framework for Federated Learning Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10617", "id": "10617", "proceeding": "http://proceedings.mlr.press/v139/huang21c.html", "slides": "", "author_site": "Baihe Huang, Xiaoxiao Li, Zhao Song, Xin Yang", "author": "Baihe Huang; Xiaoxiao Li; Zhao Song; Xin Yang", "abstract": "Federated Learning (FL) is an emerging learning scheme that allows different distributed clients to train deep neural networks together without data sharing. Neural networks have become popular due to their unprecedented success. To the best of our knowledge, the theoretical guarantees of FL concerning neural networks with explicit forms and multi-step updates are unexplored. Nevertheless, training analysis of neural networks in FL is non-trivial for two reasons: first, the objective loss function we are optimizing is non-smooth and non-convex, and second, we are even not updating in the gradient direction. Existing convergence results for gradient descent-based methods heavily rely on the fact that the gradient direction is used for updating. The current paper presents a new class of convergence analysis for FL, Federated Neural Tangent Kernel (FL-NTK), which corresponds to overparamterized ReLU neural networks trained by gradient descent in FL and is inspired by the analysis in Neural Tangent Kernel (NTK). Theoretically, FL-NTK converges to a global-optimal solution at a linear rate with properly tuned learning parameters. Furthermore, with proper distributional assumptions, FL-NTK can also achieve good generalization. The proposed theoretical analysis scheme can be generalized to more complex neural networks.", "bibtex": "@InProceedings{pmlr-v139-huang21c,\n title = \t {FL-NTK: A Neural Tangent Kernel-based Framework for Federated Learning Analysis},\n author = {Huang, Baihe and Li, Xiaoxiao and Song, Zhao and Yang, Xin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4423--4434},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21c/huang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21c.html},\n abstract = \t {Federated Learning (FL) is an emerging learning scheme that allows different distributed clients to train deep neural networks together without data sharing. Neural networks have become popular due to their unprecedented success. To the best of our knowledge, the theoretical guarantees of FL concerning neural networks with explicit forms and multi-step updates are unexplored. Nevertheless, training analysis of neural networks in FL is non-trivial for two reasons: first, the objective loss function we are optimizing is non-smooth and non-convex, and second, we are even not updating in the gradient direction. Existing convergence results for gradient descent-based methods heavily rely on the fact that the gradient direction is used for updating. The current paper presents a new class of convergence analysis for FL, Federated Neural Tangent Kernel (FL-NTK), which corresponds to overparamterized ReLU neural networks trained by gradient descent in FL and is inspired by the analysis in Neural Tangent Kernel (NTK). Theoretically, FL-NTK converges to a global-optimal solution at a linear rate with properly tuned learning parameters. Furthermore, with proper distributional assumptions, FL-NTK can also achieve good generalization. The proposed theoretical analysis scheme can be generalized to more complex neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21c/huang21c.pdf", "supp": "", "pdf_size": 371624, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14159667124764862822&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Peking University, Beijing, China; The University of British Colombia, Vancouver, BC, Canada; Institute for Advanced Study, Princeton, NJ, United States; The University of Washington, Seattle, WA, United States", "aff_domain": "pku.edu.cn;aya.yale.edu;gmail.com;cs.washington.edu", "email": "pku.edu.cn;aya.yale.edu;gmail.com;cs.washington.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/huang21c.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Peking University;University of British Columbia;Institute for Advanced Study;University of Washington", "aff_unique_dep": ";;;", "aff_unique_url": "http://www.pku.edu.cn;https://www.ubc.ca;https://www.ias.edu;https://www.washington.edu", "aff_unique_abbr": "Peking U;UBC;IAS;UW", "aff_campus_unique_index": "0;1;2;3", "aff_campus_unique": "Beijing;Vancouver;Princeton;Seattle", "aff_country_unique_index": "0;1;2;2", "aff_country_unique": "China;Canada;United States" }, { "title": "FOP: Factorizing Optimal Joint Policy of Maximum-Entropy Multi-Agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9269", "id": "9269", "proceeding": "http://proceedings.mlr.press/v139/zhang21m.html", "slides": "", "author_site": "Tianhao Zhang, yueheng li, Chen Wang, Guangming Xie, Zongqing Lu", "author": "Tianhao Zhang; Yueheng Li; Chen Wang; Guangming Xie; Zongqing Lu", "abstract": "Value decomposition recently injects vigorous vitality into multi-agent actor-critic methods. However, existing decomposed actor-critic methods cannot guarantee the convergence of global optimum. In this paper, we present a novel multi-agent actor-critic method, FOP, which can factorize the optimal joint policy induced by maximum-entropy multi-agent reinforcement learning (MARL) into individual policies. Theoretically, we prove that factorized individual policies of FOP converge to the global optimum. Empirically, in the well-known matrix game and differential game, we verify that FOP can converge to the global optimum for both discrete and continuous action spaces. We also evaluate FOP on a set of StarCraft II micromanagement tasks, and demonstrate that FOP substantially outperforms state-of-the-art decomposed value-based and actor-critic methods.", "bibtex": "@InProceedings{pmlr-v139-zhang21m,\n title = \t {FOP: Factorizing Optimal Joint Policy of Maximum-Entropy Multi-Agent Reinforcement Learning},\n author = {Zhang, Tianhao and Li, Yueheng and Wang, Chen and Xie, Guangming and Lu, Zongqing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12491--12500},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21m/zhang21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21m.html},\n abstract = \t {Value decomposition recently injects vigorous vitality into multi-agent actor-critic methods. However, existing decomposed actor-critic methods cannot guarantee the convergence of global optimum. In this paper, we present a novel multi-agent actor-critic method, FOP, which can factorize the optimal joint policy induced by maximum-entropy multi-agent reinforcement learning (MARL) into individual policies. Theoretically, we prove that factorized individual policies of FOP converge to the global optimum. Empirically, in the well-known matrix game and differential game, we verify that FOP can converge to the global optimum for both discrete and continuous action spaces. We also evaluate FOP on a set of StarCraft II micromanagement tasks, and demonstrate that FOP substantially outperforms state-of-the-art decomposed value-based and actor-critic methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21m/zhang21m.pdf", "supp": "", "pdf_size": 1928222, "gs_citation": 102, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1476248779230232018&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Peking University; Peking University; Peking University; Peking University; Peking University", "aff_domain": "pku.edu.cn;pku.edu.cn; ; ; ", "email": "pku.edu.cn;pku.edu.cn; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhang21m.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Peking University", "aff_unique_dep": "", "aff_unique_url": "http://www.pku.edu.cn", "aff_unique_abbr": "Peking U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Factor-analytic inverse regression for high-dimension, small-sample dimensionality reduction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8695", "id": "8695", "proceeding": "http://proceedings.mlr.press/v139/jha21b.html", "slides": "/media/icml-2021/Slides/8695.pdf", "author_site": "Aditi Jha, Michael J. Morais, Jonathan Pillow", "author": "Aditi Jha; Michael J. Morais; Jonathan W Pillow", "abstract": "Sufficient dimension reduction (SDR) methods are a family of supervised methods for dimensionality reduction that seek to reduce dimensionality while preserving information about a target variable of interest. However, existing SDR methods typically require more observations than the number of dimensions ($N > p$). To overcome this limitation, we propose Class-conditional Factor Analytic Dimensions (CFAD), a model-based dimensionality reduction method for high-dimensional, small-sample data. We show that CFAD substantially outperforms existing SDR methods in the small-sample regime, and can be extended to incorporate prior information such as smoothness in the projection axes. We demonstrate the effectiveness of CFAD with an application to functional magnetic resonance imaging (fMRI) measurements during visual object recognition and working memory tasks, where it outperforms existing SDR and a variety of other dimensionality-reduction methods.", "bibtex": "@InProceedings{pmlr-v139-jha21b,\n title = \t {Factor-analytic inverse regression for high-dimension, small-sample dimensionality reduction},\n author = {Jha, Aditi and Morais, Michael J. and Pillow, Jonathan W},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4850--4859},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jha21b/jha21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jha21b.html},\n abstract = \t {Sufficient dimension reduction (SDR) methods are a family of supervised methods for dimensionality reduction that seek to reduce dimensionality while preserving information about a target variable of interest. However, existing SDR methods typically require more observations than the number of dimensions ($N > p$). To overcome this limitation, we propose Class-conditional Factor Analytic Dimensions (CFAD), a model-based dimensionality reduction method for high-dimensional, small-sample data. We show that CFAD substantially outperforms existing SDR methods in the small-sample regime, and can be extended to incorporate prior information such as smoothness in the projection axes. We demonstrate the effectiveness of CFAD with an application to functional magnetic resonance imaging (fMRI) measurements during visual object recognition and working memory tasks, where it outperforms existing SDR and a variety of other dimensionality-reduction methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/jha21b/jha21b.pdf", "supp": "", "pdf_size": 5649420, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1176468731536810405&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Princeton Neuroscience Institute, Princeton University, NJ, USA+Department of Electrical and Computer Engineering, Princeton University, NJ, USA; Princeton Neuroscience Institute, Princeton University, NJ, USA; Department of Psychology, Princeton University, NJ, USA", "aff_domain": "princeton.edu; ; ", "email": "princeton.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jha21b.html", "aff_unique_index": "0+0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Princeton Neuroscience Institute", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "0+0;0", "aff_campus_unique": "Princeton;", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Fair Classification with Noisy Protected Attributes: A Framework with Provable Guarantees", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10551", "id": "10551", "proceeding": "http://proceedings.mlr.press/v139/celis21a.html", "slides": "/media/icml-2021/Slides/10551.pdf", "author_site": "L. Elisa Celis, Lingxiao Huang, Vijay Keswani, Nisheeth K. Vishnoi", "author": "L. Elisa Celis; Lingxiao Huang; Vijay Keswani; Nisheeth K. Vishnoi", "abstract": "We present an optimization framework for learning a fair classifier in the presence of noisy perturbations in the protected attributes. Compared to prior work, our framework can be employed with a very general class of linear and linear-fractional fairness constraints, can handle multiple, non-binary protected attributes, and outputs a classifier that comes with provable guarantees on both accuracy and fairness. Empirically, we show that our framework can be used to attain either statistical rate or false positive rate fairness guarantees with a minimal loss in accuracy, even when the noise is large, in two real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-celis21a,\n title = \t {Fair Classification with Noisy Protected Attributes: A Framework with Provable Guarantees},\n author = {Celis, L. Elisa and Huang, Lingxiao and Keswani, Vijay and Vishnoi, Nisheeth K.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1349--1361},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/celis21a/celis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/celis21a.html},\n abstract = \t {We present an optimization framework for learning a fair classifier in the presence of noisy perturbations in the protected attributes. Compared to prior work, our framework can be employed with a very general class of linear and linear-fractional fairness constraints, can handle multiple, non-binary protected attributes, and outputs a classifier that comes with provable guarantees on both accuracy and fairness. Empirically, we show that our framework can be used to attain either statistical rate or false positive rate fairness guarantees with a minimal loss in accuracy, even when the noise is large, in two real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/celis21a/celis21a.pdf", "supp": "", "pdf_size": 759193, "gs_citation": 72, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4690138498612802428&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Statistics and Data Science, Yale University, USA; Tsinghua University, China; Department of Statistics and Data Science, Yale University, USA; Department of Computer Science, Yale University, USA", "aff_domain": "yale.edu; ; ; ", "email": "yale.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/celis21a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Yale University;Tsinghua University", "aff_unique_dep": "Department of Statistics and Data Science;", "aff_unique_url": "https://www.yale.edu;https://www.tsinghua.edu.cn", "aff_unique_abbr": "Yale;THU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "Fair Selective Classification Via Sufficiency", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9311", "id": "9311", "proceeding": "http://proceedings.mlr.press/v139/lee21b.html", "slides": "", "author_site": "Joshua Lee, Yuheng Bu, Deepta Rajan, Prasanna Sattigeri, Rameswar Panda, Subhro Das, Gregory Wornell", "author": "Joshua K Lee; Yuheng Bu; Deepta Rajan; Prasanna Sattigeri; Rameswar Panda; Subhro Das; Gregory W Wornell", "abstract": "Selective classification is a powerful tool for decision-making in scenarios where mistakes are costly but abstentions are allowed. In general, by allowing a classifier to abstain, one can improve the performance of a model at the cost of reducing coverage and classifying fewer samples. However, recent work has shown, in some cases, that selective classification can magnify disparities between groups, and has illustrated this phenomenon on multiple real-world datasets. We prove that the sufficiency criterion can be used to mitigate these disparities by ensuring that selective classification increases performance on all groups, and introduce a method for mitigating the disparity in precision across the entire coverage scale based on this criterion. We then provide an upper bound on the conditional mutual information between the class label and sensitive attribute, conditioned on the learned features, which can be used as a regularizer to achieve fairer selective classification. The effectiveness of the method is demonstrated on the Adult, CelebA, Civil Comments, and CheXpert datasets.", "bibtex": "@InProceedings{pmlr-v139-lee21b,\n title = \t {Fair Selective Classification Via Sufficiency},\n author = {Lee, Joshua K and Bu, Yuheng and Rajan, Deepta and Sattigeri, Prasanna and Panda, Rameswar and Das, Subhro and Wornell, Gregory W},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6076--6086},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21b/lee21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21b.html},\n abstract = \t {Selective classification is a powerful tool for decision-making in scenarios where mistakes are costly but abstentions are allowed. In general, by allowing a classifier to abstain, one can improve the performance of a model at the cost of reducing coverage and classifying fewer samples. However, recent work has shown, in some cases, that selective classification can magnify disparities between groups, and has illustrated this phenomenon on multiple real-world datasets. We prove that the sufficiency criterion can be used to mitigate these disparities by ensuring that selective classification increases performance on all groups, and introduce a method for mitigating the disparity in precision across the entire coverage scale based on this criterion. We then provide an upper bound on the conditional mutual information between the class label and sensitive attribute, conditioned on the learned features, which can be used as a regularizer to achieve fairer selective classification. The effectiveness of the method is demonstrated on the Adult, CelebA, Civil Comments, and CheXpert datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21b/lee21b.pdf", "supp": "", "pdf_size": 1003966, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11234225765483961486&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, Cambridge, USA+MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, Cambridge, USA+MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; MIT-IBM Watson AI Lab, IBM Research, Cambridge, USA; Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, Cambridge, USA", "aff_domain": "mit.edu;hotmail.com;ibm.com;ibm.com;ibm.com;ibm.com;mit.edu", "email": "mit.edu;hotmail.com;ibm.com;ibm.com;ibm.com;ibm.com;mit.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/lee21b.html", "aff_unique_index": "0+1;0+1;1;1;1;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;IBM", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;AI Lab", "aff_unique_url": "https://web.mit.edu;https://www.ibmwatsonai.org/", "aff_unique_abbr": "MIT;MIT-IBM AI Lab", "aff_campus_unique_index": "0+0;0+0;0;0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0+0;0+0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fairness and Bias in Online Selection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10113", "id": "10113", "proceeding": "http://proceedings.mlr.press/v139/correa21a.html", "slides": "/media/icml-2021/Slides/10113.pdf", "author_site": "Jose Correa, Andres Cristi, Paul Duetting, Ashkan Norouzi-Fard", "author": "Jose Correa; Andres Cristi; Paul Duetting; Ashkan Norouzi-Fard", "abstract": "There is growing awareness and concern about fairness in machine learning and algorithm design. This is particularly true in online selection problems where decisions are often biased, for example, when assessing credit risks or hiring staff. We address the issues of fairness and bias in online selection by introducing multi-color versions of the classic secretary and prophet problem. Interestingly, existing algorithms for these problems are either very unfair or very inefficient, so we develop optimal fair algorithms for these new problems and provide tight bounds on their competitiveness. We validate our theoretical findings on real-world data.", "bibtex": "@InProceedings{pmlr-v139-correa21a,\n title = \t {Fairness and Bias in Online Selection},\n author = {Correa, Jose and Cristi, Andres and Duetting, Paul and Norouzi-Fard, Ashkan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2112--2121},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/correa21a/correa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/correa21a.html},\n abstract = \t {There is growing awareness and concern about fairness in machine learning and algorithm design. This is particularly true in online selection problems where decisions are often biased, for example, when assessing credit risks or hiring staff. We address the issues of fairness and bias in online selection by introducing multi-color versions of the classic secretary and prophet problem. Interestingly, existing algorithms for these problems are either very unfair or very inefficient, so we develop optimal fair algorithms for these new problems and provide tight bounds on their competitiveness. We validate our theoretical findings on real-world data.}\n}", "pdf": "http://proceedings.mlr.press/v139/correa21a/correa21a.pdf", "supp": "", "pdf_size": 620290, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5703771958854114427&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Industrial Engineering, Universidad de Chile, Santiago, Chile; Department of Industrial Engineering, Universidad de Chile, Santiago, Chile; Google Research, Z\u00fcrich, Switzerland; Google Research, Z\u00fcrich, Switzerland", "aff_domain": "ing.uchile.cl;ing.uchile.cl;google.com;google.com", "email": "ing.uchile.cl;ing.uchile.cl;google.com;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/correa21a.html", "aff_unique_index": "0;0;1;1", "aff_unique_norm": "Universidad de Chile;Google", "aff_unique_dep": "Department of Industrial Engineering;Google Research", "aff_unique_url": "https://www.uchile.cl;https://research.google", "aff_unique_abbr": "UCH;Google Research", "aff_campus_unique_index": "0;0;1;1", "aff_campus_unique": "Santiago;Z\u00fcrich", "aff_country_unique_index": "0;0;1;1", "aff_country_unique": "Chile;Switzerland" }, { "title": "Fairness for Image Generation with Uncertain Sensitive Attributes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8877", "id": "8877", "proceeding": "http://proceedings.mlr.press/v139/jalal21b.html", "slides": "", "author_site": "Ajil Jalal, Sushrut Karmalkar, Jessica Hoffmann, Alexandros Dimakis, Eric Price", "author": "Ajil Jalal; Sushrut Karmalkar; Jessica Hoffmann; Alex Dimakis; Eric Price", "abstract": "This work tackles the issue of fairness in the context of generative procedures, such as image super-resolution, which entail different definitions from the standard classification setting. Moreover, while traditional group fairness definitions are typically defined with respect to specified protected groups \u2013 camouflaging the fact that these groupings are artificial and carry historical and political motivations \u2013 we emphasize that there are no ground truth identities. For instance, should South and East Asians be viewed as a single group or separate groups? Should we consider one race as a whole or further split by gender? Choosing which groups are valid and who belongs in them is an impossible dilemma and being \u201cfair\u201d with respect to Asians may require being \u201cunfair\u201d with respect to South Asians. This motivates the introduction of definitions that allow algorithms to be \\emph{oblivious} to the relevant groupings. We define several intuitive notions of group fairness and study their incompatibilities and trade-offs. We show that the natural extension of demographic parity is strongly dependent on the grouping, and \\emph{impossible} to achieve obliviously. On the other hand, the conceptually new definition we introduce, Conditional Proportional Representation, can be achieved obliviously through Posterior Sampling. Our experiments validate our theoretical results and achieve fair image reconstruction using state-of-the-art generative models.", "bibtex": "@InProceedings{pmlr-v139-jalal21b,\n title = \t {Fairness for Image Generation with Uncertain Sensitive Attributes},\n author = {Jalal, Ajil and Karmalkar, Sushrut and Hoffmann, Jessica and Dimakis, Alex and Price, Eric},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4721--4732},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jalal21b/jalal21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jalal21b.html},\n abstract = \t {This work tackles the issue of fairness in the context of generative procedures, such as image super-resolution, which entail different definitions from the standard classification setting. Moreover, while traditional group fairness definitions are typically defined with respect to specified protected groups \u2013 camouflaging the fact that these groupings are artificial and carry historical and political motivations \u2013 we emphasize that there are no ground truth identities. For instance, should South and East Asians be viewed as a single group or separate groups? Should we consider one race as a whole or further split by gender? Choosing which groups are valid and who belongs in them is an impossible dilemma and being \u201cfair\u201d with respect to Asians may require being \u201cunfair\u201d with respect to South Asians. This motivates the introduction of definitions that allow algorithms to be \\emph{oblivious} to the relevant groupings. We define several intuitive notions of group fairness and study their incompatibilities and trade-offs. We show that the natural extension of demographic parity is strongly dependent on the grouping, and \\emph{impossible} to achieve obliviously. On the other hand, the conceptually new definition we introduce, Conditional Proportional Representation, can be achieved obliviously through Posterior Sampling. Our experiments validate our theoretical results and achieve fair image reconstruction using state-of-the-art generative models.}\n}", "pdf": "http://proceedings.mlr.press/v139/jalal21b/jalal21b.pdf", "supp": "", "pdf_size": 5634969, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8101927413528099299&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Electrical and Computer Engineering, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", "aff_domain": "utexas.edu; ; ; ; ", "email": "utexas.edu; ; ; ; ", "github": "https://github.com/ajiljalal/code-cs-fairness", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/jalal21b.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fairness of Exposure in Stochastic Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9431", "id": "9431", "proceeding": "http://proceedings.mlr.press/v139/wang21b.html", "slides": "", "author_site": "Luke Lequn Wang, Yiwei Bai, Wen Sun, Thorsten Joachims", "author": "Lequn Wang; Yiwei Bai; Wen Sun; Thorsten Joachims", "abstract": "Contextual bandit algorithms have become widely used for recommendation in online systems (e.g. marketplaces, music streaming, news), where they now wield substantial influence on which items get shown to users. This raises questions of fairness to the items \u2014 and to the sellers, artists, and writers that benefit from this exposure. We argue that the conventional bandit formulation can lead to an undesirable and unfair winner-takes-all allocation of exposure. To remedy this problem, we propose a new bandit objective that guarantees merit-based fairness of exposure to the items while optimizing utility to the users. We formulate fairness regret and reward regret in this setting and present algorithms for both stochastic multi-armed bandits and stochastic linear bandits. We prove that the algorithms achieve sublinear fairness regret and reward regret. Beyond the theoretical analysis, we also provide empirical evidence that these algorithms can allocate exposure to different arms effectively.", "bibtex": "@InProceedings{pmlr-v139-wang21b,\n title = \t {Fairness of Exposure in Stochastic Bandits},\n author = {Wang, Lequn and Bai, Yiwei and Sun, Wen and Joachims, Thorsten},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10686--10696},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21b/wang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21b.html},\n abstract = \t {Contextual bandit algorithms have become widely used for recommendation in online systems (e.g. marketplaces, music streaming, news), where they now wield substantial influence on which items get shown to users. This raises questions of fairness to the items \u2014 and to the sellers, artists, and writers that benefit from this exposure. We argue that the conventional bandit formulation can lead to an undesirable and unfair winner-takes-all allocation of exposure. To remedy this problem, we propose a new bandit objective that guarantees merit-based fairness of exposure to the items while optimizing utility to the users. We formulate fairness regret and reward regret in this setting and present algorithms for both stochastic multi-armed bandits and stochastic linear bandits. We prove that the algorithms achieve sublinear fairness regret and reward regret. Beyond the theoretical analysis, we also provide empirical evidence that these algorithms can allocate exposure to different arms effectively.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21b/wang21b.pdf", "supp": "", "pdf_size": 972395, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12530794690153369259&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Cornell University, Ithaca, NY, USA; Department of Computer Science, Cornell University, Ithaca, NY, USA; Department of Computer Science, Cornell University, Ithaca, NY, USA; Department of Computer Science, Cornell University, Ithaca, NY, USA", "aff_domain": "cornell.edu; ;cornell.edu;cs.cornell.edu", "email": "cornell.edu; ;cornell.edu;cs.cornell.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fast Algorithms for Stackelberg Prediction Game with Least Squares Loss", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8495", "id": "8495", "proceeding": "http://proceedings.mlr.press/v139/wang21d.html", "slides": "", "author_site": "Jiali Wang, He Chen, Rujun Jiang, Xudong Li, Zihao Li", "author": "Jiali Wang; He Chen; Rujun Jiang; Xudong Li; Zihao Li", "abstract": "The Stackelberg prediction game (SPG) has been extensively used to model the interactions between the learner and data provider in the training process of various machine learning algorithms. Particularly, SPGs played prominent roles in cybersecurity applications, such as intrusion detection, banking fraud detection, spam filtering, and malware detection. Often formulated as NP-hard bi-level optimization problems, it is generally computationally intractable to find global solutions to SPGs. As an interesting progress in this area, a special class of SPGs with the least squares loss (SPG-LS) have recently been shown polynomially solvable by a bisection method. However, in each iteration of this method, a semidefinite program (SDP) needs to be solved. The resulted high computational costs prevent its applications for large-scale problems. In contrast, we propose a novel approach that reformulates a SPG-LS as a single SDP of a similar form and the same dimension as those solved in the bisection method. Our SDP reformulation is, evidenced by our numerical experiments, orders of magnitude faster than the existing bisection method. We further show that the obtained SDP can be reduced to a second order cone program (SOCP). This allows us to provide real-time response to large-scale SPG-LS problems. Numerical results on both synthetic and real world datasets indicate that the proposed SOCP method is up to 20,000+ times faster than the state of the art.", "bibtex": "@InProceedings{pmlr-v139-wang21d,\n title = \t {Fast Algorithms for Stackelberg Prediction Game with Least Squares Loss},\n author = {Wang, Jiali and Chen, He and Jiang, Rujun and Li, Xudong and Li, Zihao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10708--10716},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21d/wang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21d.html},\n abstract = \t {The Stackelberg prediction game (SPG) has been extensively used to model the interactions between the learner and data provider in the training process of various machine learning algorithms. Particularly, SPGs played prominent roles in cybersecurity applications, such as intrusion detection, banking fraud detection, spam filtering, and malware detection. Often formulated as NP-hard bi-level optimization problems, it is generally computationally intractable to find global solutions to SPGs. As an interesting progress in this area, a special class of SPGs with the least squares loss (SPG-LS) have recently been shown polynomially solvable by a bisection method. However, in each iteration of this method, a semidefinite program (SDP) needs to be solved. The resulted high computational costs prevent its applications for large-scale problems. In contrast, we propose a novel approach that reformulates a SPG-LS as a single SDP of a similar form and the same dimension as those solved in the bisection method. Our SDP reformulation is, evidenced by our numerical experiments, orders of magnitude faster than the existing bisection method. We further show that the obtained SDP can be reduced to a second order cone program (SOCP). This allows us to provide real-time response to large-scale SPG-LS problems. Numerical results on both synthetic and real world datasets indicate that the proposed SOCP method is up to 20,000+ times faster than the state of the art.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21d/wang21d.pdf", "supp": "", "pdf_size": 462536, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2550353303659094230&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Data Science, Fudan University, China; School of Mathematical Sciences, Fudan University, China; School of Data Science, Fudan University, China; School of Data Science, Fudan University, China; School of Mathematical Sciences, Fudan University, China", "aff_domain": "fudan.edu.cn; ;fudan.edu.cn; ; ", "email": "fudan.edu.cn; ;fudan.edu.cn; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wang21d.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Fudan University", "aff_unique_dep": "School of Data Science", "aff_unique_url": "https://www.fudan.edu.cn", "aff_unique_abbr": "Fudan", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Fast Projection Onto Convex Smooth Constraints", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9373", "id": "9373", "proceeding": "http://proceedings.mlr.press/v139/usmanova21a.html", "slides": "/media/icml-2021/Slides/9373.pdf", "author_site": "Ilnura Usmanova, Maryam Kamgarpour, Andreas Krause, Kfir Levy", "author": "Ilnura Usmanova; Maryam Kamgarpour; Andreas Krause; Kfir Levy", "abstract": "The Euclidean projection onto a convex set is an important problem that arises in numerous constrained optimization tasks. Unfortunately, in many cases, computing projections is computationally demanding. In this work, we focus on projection problems where the constraints are smooth and the number of constraints is significantly smaller than the dimension. The runtime of existing approaches to solving such problems is either cubic in the dimension or polynomial in the inverse of the target accuracy. Conversely, we propose a simple and efficient primal-dual approach, with a runtime that scales only linearly with the dimension, and only logarithmically in the inverse of the target accuracy. We empirically demonstrate its performance, and compare it with standard baselines.", "bibtex": "@InProceedings{pmlr-v139-usmanova21a,\n title = \t {Fast Projection Onto Convex Smooth Constraints},\n author = {Usmanova, Ilnura and Kamgarpour, Maryam and Krause, Andreas and Levy, Kfir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10476--10486},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/usmanova21a/usmanova21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/usmanova21a.html},\n abstract = \t {The Euclidean projection onto a convex set is an important problem that arises in numerous constrained optimization tasks. Unfortunately, in many cases, computing projections is computationally demanding. In this work, we focus on projection problems where the constraints are smooth and the number of constraints is significantly smaller than the dimension. The runtime of existing approaches to solving such problems is either cubic in the dimension or polynomial in the inverse of the target accuracy. Conversely, we propose a simple and efficient primal-dual approach, with a runtime that scales only linearly with the dimension, and only logarithmically in the inverse of the target accuracy. We empirically demonstrate its performance, and compare it with standard baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/usmanova21a/usmanova21a.pdf", "supp": "", "pdf_size": 378508, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3937499290494012159&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Automatic Control Laboratory, D-ITET, ETH Z\u00fcrich, Switzerland+Department of Electrical & Computer Engineering, Technion - Israel Institute of Technology; Department of Electrical and Computer Engineering, University of British Columbia, Canada; Department of Computer Science, ETH Z\u00fcrich, Switzerland; A Viterby fellow+Department of Electrical & Computer Engineering, Technion - Israel Institute of Technology", "aff_domain": "control.ee.ethz.ch; ; ; ", "email": "control.ee.ethz.ch; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/usmanova21a.html", "aff_unique_index": "0+1;2;0;1", "aff_unique_norm": "ETH Zurich;Technion - Israel Institute of Technology;University of British Columbia;", "aff_unique_dep": "Automatic Control Laboratory, D-ITET;Department of Electrical & Computer Engineering;Department of Electrical and Computer Engineering;", "aff_unique_url": "https://www.ethz.ch;https://www.technion.ac.il;https://www.ubc.ca;", "aff_unique_abbr": "ETHZ;Technion;UBC;", "aff_campus_unique_index": "0;", "aff_campus_unique": "Z\u00fcrich;", "aff_country_unique_index": "0+1;2;0;1", "aff_country_unique": "Switzerland;Israel;Canada;" }, { "title": "Fast Sketching of Polynomial Kernels of Polynomial Degree", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9109", "id": "9109", "proceeding": "http://proceedings.mlr.press/v139/song21c.html", "slides": "/media/icml-2021/Slides/9109.pdf", "author_site": "Zhao Song, David Woodruff, Zheng Yu, Lichen Zhang", "author": "Zhao Song; David Woodruff; Zheng Yu; Lichen Zhang", "abstract": "Kernel methods are fundamental in machine learning, and faster algorithms for kernel approximation provide direct speedups for many core tasks in machine learning. The polynomial kernel is especially important as other kernels can often be approximated by the polynomial kernel via a Taylor series expansion. Recent techniques in oblivious sketching reduce the dependence in the running time on the degree $q$ of the polynomial kernel from exponential to polynomial, which is useful for the Gaussian kernel, for which $q$ can be chosen to be polylogarithmic. However, for more slowly growing kernels, such as the neural tangent and arc cosine kernels, $q$ needs to be polynomial, and previous work incurs a polynomial factor slowdown in the running time. We give a new oblivious sketch which greatly improves upon this running time, by removing the dependence on $q$ in the leading order term. Combined with a novel sampling scheme, we give the fastest algorithms for approximating a large family of slow-growing kernels.", "bibtex": "@InProceedings{pmlr-v139-song21c,\n title = \t {Fast Sketching of Polynomial Kernels of Polynomial Degree},\n author = {Song, Zhao and Woodruff, David and Yu, Zheng and Zhang, Lichen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9812--9823},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/song21c/song21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/song21c.html},\n abstract = \t {Kernel methods are fundamental in machine learning, and faster algorithms for kernel approximation provide direct speedups for many core tasks in machine learning. The polynomial kernel is especially important as other kernels can often be approximated by the polynomial kernel via a Taylor series expansion. Recent techniques in oblivious sketching reduce the dependence in the running time on the degree $q$ of the polynomial kernel from exponential to polynomial, which is useful for the Gaussian kernel, for which $q$ can be chosen to be polylogarithmic. However, for more slowly growing kernels, such as the neural tangent and arc cosine kernels, $q$ needs to be polynomial, and previous work incurs a polynomial factor slowdown in the running time. We give a new oblivious sketch which greatly improves upon this running time, by removing the dependence on $q$ in the leading order term. Combined with a novel sampling scheme, we give the fastest algorithms for approximating a large family of slow-growing kernels.}\n}", "pdf": "http://proceedings.mlr.press/v139/song21c/song21c.pdf", "supp": "", "pdf_size": 374539, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15731402114639682016&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Princeton University and Institute for Advanced Study; Carnegie Mellon University; Princeton University; Carnegie Mellon University", "aff_domain": "gmail.com;andrew.cmu.edu;princeton.edu;andrew.cmu.edu", "email": "gmail.com;andrew.cmu.edu;princeton.edu;andrew.cmu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/song21c.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "Princeton University;Carnegie Mellon University", "aff_unique_dep": ";", "aff_unique_url": "https://www.princeton.edu;https://www.cmu.edu", "aff_unique_abbr": "Princeton;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fast Stochastic Bregman Gradient Methods: Sharp Analysis and Variance Reduction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9483", "id": "9483", "proceeding": "http://proceedings.mlr.press/v139/dragomir21a.html", "slides": "/media/icml-2021/Slides/9483.pdf", "author_site": "Radu Alexandru Dragomir, Mathieu Even, Hadrien Hendrikx", "author": "Radu Alexandru Dragomir; Mathieu Even; Hadrien Hendrikx", "abstract": "We study the problem of minimizing a relatively-smooth convex function using stochastic Bregman gradient methods. We first prove the convergence of Bregman Stochastic Gradient Descent (BSGD) to a region that depends on the noise (magnitude of the gradients) at the optimum. In particular, BSGD quickly converges to the exact minimizer when this noise is zero (interpolation setting, in which the data is fit perfectly). Otherwise, when the objective has a finite sum structure, we show that variance reduction can be used to counter the effect of noise. In particular, fast convergence to the exact minimizer can be obtained under additional regularity assumptions on the Bregman reference function. We illustrate the effectiveness of our approach on two key applications of relative smoothness: tomographic reconstruction with Poisson noise and statistical preconditioning for distributed optimization.", "bibtex": "@InProceedings{pmlr-v139-dragomir21a,\n title = \t {Fast Stochastic Bregman Gradient Methods: Sharp Analysis and Variance Reduction},\n author = {Dragomir, Radu Alexandru and Even, Mathieu and Hendrikx, Hadrien},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2815--2825},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dragomir21a/dragomir21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dragomir21a.html},\n abstract = \t {We study the problem of minimizing a relatively-smooth convex function using stochastic Bregman gradient methods. We first prove the convergence of Bregman Stochastic Gradient Descent (BSGD) to a region that depends on the noise (magnitude of the gradients) at the optimum. In particular, BSGD quickly converges to the exact minimizer when this noise is zero (interpolation setting, in which the data is fit perfectly). Otherwise, when the objective has a finite sum structure, we show that variance reduction can be used to counter the effect of noise. In particular, fast convergence to the exact minimizer can be obtained under additional regularity assumptions on the Bregman reference function. We illustrate the effectiveness of our approach on two key applications of relative smoothness: tomographic reconstruction with Poisson noise and statistical preconditioning for distributed optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/dragomir21a/dragomir21a.pdf", "supp": "", "pdf_size": 1458229, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9701160379351242712&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Universit\u00e9 Toulouse 1 Capitole + D.I. Ecole Normale Sup\u00e9rieure, CRNS, PSL University, Paris + INRIA Paris; D.I. Ecole Normale Sup\u00e9rieure, CRNS, PSL University, Paris + INRIA Paris; D.I. Ecole Normale Sup\u00e9rieure, CRNS, PSL University, Paris + INRIA Paris", "aff_domain": "inria.fr;ens.psl.eu;inria.fr", "email": "inria.fr;ens.psl.eu;inria.fr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dragomir21a.html", "aff_unique_index": "0+1+2;1+2;1+2", "aff_unique_norm": "Universit\u00e9 Toulouse 1 Capitole;Ecole Normale Sup\u00e9rieure;INRIA", "aff_unique_dep": ";;", "aff_unique_url": "https://www.univ-tlse1.fr;https://www.ens.fr;https://www.inria.fr", "aff_unique_abbr": "UT1;ENS;INRIA", "aff_campus_unique_index": "0+1+1;1+1;1+1", "aff_campus_unique": "Toulouse;Paris", "aff_country_unique_index": "0+0+0;0+0;0+0", "aff_country_unique": "France" }, { "title": "Fast active learning for pure exploration in reinforcement learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8541", "id": "8541", "proceeding": "http://proceedings.mlr.press/v139/menard21a.html", "slides": "", "author_site": "Pierre Menard, Omar Darwiche Domingues, Anders Jonsson, Emilie Kaufmann, Edouard Leurent, Michal Valko", "author": "Pierre Menard; Omar Darwiche Domingues; Anders Jonsson; Emilie Kaufmann; Edouard Leurent; Michal Valko", "abstract": "Realistic environments often provide agents with very limited feedback. When the environment is initially unknown, the feedback, in the beginning, can be completely absent, and the agents may first choose to devote all their effort on \\emph{exploring efficiently.} The exploration remains a challenge while it has been addressed with many hand-tuned heuristics with different levels of generality on one side, and a few theoretically-backed exploration strategies on the other. Many of them are incarnated by \\emph{intrinsic motivation} and in particular \\emph{explorations bonuses}. A common choice is to use $1/\\sqrt{n}$ bonus, where $n$ is a number of times this particular state-action pair was visited. We show that, surprisingly, for a pure-exploration objective of \\emph{reward-free exploration}, bonuses that scale with $1/n$ bring faster learning rates, improving the known upper bounds with respect to the dependence on the horizon $H$. Furthermore, we show that with an improved analysis of the stopping time, we can improve by a factor $H$ the sample complexity in the \\emph{best-policy identification} setting, which is another pure-exploration objective, where the environment provides rewards but the agent is not penalized for its behavior during the exploration phase.", "bibtex": "@InProceedings{pmlr-v139-menard21a,\n title = \t {Fast active learning for pure exploration in reinforcement learning},\n author = {Menard, Pierre and Domingues, Omar Darwiche and Jonsson, Anders and Kaufmann, Emilie and Leurent, Edouard and Valko, Michal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7599--7608},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/menard21a/menard21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/menard21a.html},\n abstract = \t {Realistic environments often provide agents with very limited feedback. When the environment is initially unknown, the feedback, in the beginning, can be completely absent, and the agents may first choose to devote all their effort on \\emph{exploring efficiently.} The exploration remains a challenge while it has been addressed with many hand-tuned heuristics with different levels of generality on one side, and a few theoretically-backed exploration strategies on the other. Many of them are incarnated by \\emph{intrinsic motivation} and in particular \\emph{explorations bonuses}. A common choice is to use $1/\\sqrt{n}$ bonus, where $n$ is a number of times this particular state-action pair was visited. We show that, surprisingly, for a pure-exploration objective of \\emph{reward-free exploration}, bonuses that scale with $1/n$ bring faster learning rates, improving the known upper bounds with respect to the dependence on the horizon $H$. Furthermore, we show that with an improved analysis of the stopping time, we can improve by a factor $H$ the sample complexity in the \\emph{best-policy identification} setting, which is another pure-exploration objective, where the environment provides rewards but the agent is not penalized for its behavior during the exploration phase.}\n}", "pdf": "http://proceedings.mlr.press/v139/menard21a/menard21a.pdf", "supp": "", "pdf_size": 413863, "gs_citation": 97, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7639060689152294362&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Otto von Guericke University; Inria; Inria + Universit\u00e9 de Lille; Universitat Pompeu Fabra; Inria + DeepMind Paris; Inria + Universit\u00e9 de Lille + DeepMind Paris", "aff_domain": "ovgu.de;inria.fr; ; ;inria.fr;inria.fr", "email": "ovgu.de;inria.fr; ; ;inria.fr;inria.fr", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/menard21a.html", "aff_unique_index": "0;1;1+2;3;1+4;1+2+4", "aff_unique_norm": "Otto von Guericke University Magdeburg;INRIA;Universit\u00e9 de Lille;Universitat Pompeu Fabra;DeepMind", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.ovgu.de;https://www.inria.fr;https://www.univ-lille.fr;https://www.upf.edu/;https://deepmind.com", "aff_unique_abbr": "OVGU;Inria;UdeL;UPF;DeepMind", "aff_campus_unique_index": ";1;1", "aff_campus_unique": ";Paris", "aff_country_unique_index": "0;1;1+1;2;1+1;1+1+1", "aff_country_unique": "Germany;France;Spain" }, { "title": "Fast margin maximization via dual acceleration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9251", "id": "9251", "proceeding": "http://proceedings.mlr.press/v139/ji21a.html", "slides": "", "author_site": "Ziwei Ji, Nati Srebro, Matus Telgarsky", "author": "Ziwei Ji; Nathan Srebro; Matus Telgarsky", "abstract": "We present and analyze a momentum-based gradient method for training linear classifiers with an exponentially-tailed loss (e.g., the exponential or logistic loss), which maximizes the classification margin on separable data at a rate of O(1/t^2). This contrasts with a rate of O(1/log(t)) for standard gradient descent, and O(1/t) for normalized gradient descent. The momentum-based method is derived via the convex dual of the maximum-margin problem, and specifically by applying Nesterov acceleration to this dual, which manages to result in a simple and intuitive method in the primal. This dual view can also be used to derive a stochastic variant, which performs adaptive non-uniform sampling via the dual variables.", "bibtex": "@InProceedings{pmlr-v139-ji21a,\n title = \t {Fast margin maximization via dual acceleration},\n author = {Ji, Ziwei and Srebro, Nathan and Telgarsky, Matus},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4860--4869},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ji21a/ji21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ji21a.html},\n abstract = \t {We present and analyze a momentum-based gradient method for training linear classifiers with an exponentially-tailed loss (e.g., the exponential or logistic loss), which maximizes the classification margin on separable data at a rate of O(1/t^2). This contrasts with a rate of O(1/log(t)) for standard gradient descent, and O(1/t) for normalized gradient descent. The momentum-based method is derived via the convex dual of the maximum-margin problem, and specifically by applying Nesterov acceleration to this dual, which manages to result in a simple and intuitive method in the primal. This dual view can also be used to derive a stochastic variant, which performs adaptive non-uniform sampling via the dual variables.}\n}", "pdf": "http://proceedings.mlr.press/v139/ji21a/ji21a.pdf", "supp": "", "pdf_size": 1288538, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3023024090001588966&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, University of Illinois at Urbana-Champaign, Urbana, Illinois, USA+Toyota Technical Institute of Chicago, Chicago, Illinois, USA; Toyota Technical Institute of Chicago, Chicago, Illinois, USA; Department of Computer Science, University of Illinois at Urbana-Champaign, Urbana, Illinois, USA", "aff_domain": "illinois.edu; ; ", "email": "illinois.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ji21a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "University of Illinois Urbana-Champaign;Toyota Technical Institute", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://illinois.edu;https://www.tti-c.org", "aff_unique_abbr": "UIUC;TTI-C", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "Urbana;Chicago", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Faster Kernel Matrix Algebra via Density Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9737", "id": "9737", "proceeding": "http://proceedings.mlr.press/v139/backurs21a.html", "slides": "", "author_site": "Arturs Backurs, Piotr Indyk, Cameron Musco, Tal Wagner", "author": "Arturs Backurs; Piotr Indyk; Cameron Musco; Tal Wagner", "abstract": "We study fast algorithms for computing basic properties of an n x n positive semidefinite kernel matrix K corresponding to n points x_1,...,x_n in R^d. In particular, we consider the estimating the sum of kernel matrix entries, along with its top eigenvalue and eigenvector. These are some of the most basic problems defined over kernel matrices. We show that the sum of matrix entries can be estimated up to a multiplicative factor of 1+\\epsilon in time sublinear in n and linear in d for many popular kernel functions, including the Gaussian, exponential, and rational quadratic kernels. For these kernels, we also show that the top eigenvalue (and a witnessing approximate eigenvector) can be approximated to a multiplicative factor of 1+\\epsilon in time sub-quadratic in n and linear in d. Our algorithms represent significant advances in the best known runtimes for these problems. They leverage the positive definiteness of the kernel matrix, along with a recent line of work on efficient kernel density estimation.", "bibtex": "@InProceedings{pmlr-v139-backurs21a,\n title = \t {Faster Kernel Matrix Algebra via Density Estimation},\n author = {Backurs, Arturs and Indyk, Piotr and Musco, Cameron and Wagner, Tal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {500--510},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/backurs21a/backurs21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/backurs21a.html},\n abstract = \t {We study fast algorithms for computing basic properties of an n x n positive semidefinite kernel matrix K corresponding to n points x_1,...,x_n in R^d. In particular, we consider the estimating the sum of kernel matrix entries, along with its top eigenvalue and eigenvector. These are some of the most basic problems defined over kernel matrices. We show that the sum of matrix entries can be estimated up to a multiplicative factor of 1+\\epsilon in time sublinear in n and linear in d for many popular kernel functions, including the Gaussian, exponential, and rational quadratic kernels. For these kernels, we also show that the top eigenvalue (and a witnessing approximate eigenvector) can be approximated to a multiplicative factor of 1+\\epsilon in time sub-quadratic in n and linear in d. Our algorithms represent significant advances in the best known runtimes for these problems. They leverage the positive definiteness of the kernel matrix, along with a recent line of work on efficient kernel density estimation.}\n}", "pdf": "http://proceedings.mlr.press/v139/backurs21a/backurs21a.pdf", "supp": "", "pdf_size": 423080, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9646093563169287141&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Toyota Technological Institute at Chicago, Chicago, IL, USA; Massachusetts Institute of Technology, Cambridge, MA, USA; University of Massachusetts Amherst, Amherst, MA, USA; Microsoft Research Redmond, Redmond, WA, USA", "aff_domain": "ttic.edu;mit.edu;umass.edu;mit.edu", "email": "ttic.edu;mit.edu;umass.edu;mit.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/backurs21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Toyota Technological Institute at Chicago;Massachusetts Institute of Technology;University of Massachusetts Amherst;Microsoft", "aff_unique_dep": ";;;Microsoft Research", "aff_unique_url": "https://www.tti-chicago.org;https://www.mit.edu;https://www.umass.edu;https://www.microsoft.com/en-us/research/group/microsoft-research-redmond", "aff_unique_abbr": "TTI Chicago;MIT;UMass Amherst;MSR", "aff_campus_unique_index": "0;1;2;3", "aff_campus_unique": "Chicago;Cambridge;Amherst;Redmond", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Feature Clustering for Support Identification in Extreme Regions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10013", "id": "10013", "proceeding": "http://proceedings.mlr.press/v139/jalalzai21a.html", "slides": "", "author_site": "Hamid Jalalzai, R\u00e9mi Leluc", "author": "Hamid Jalalzai; R\u00e9mi Leluc", "abstract": "Understanding the complex structure of multivariate extremes is a major challenge in various fields from portfolio monitoring and environmental risk management to insurance. In the framework of multivariate Extreme Value Theory, a common characterization of extremes\u2019 dependence structure is the angular measure. It is a suitable measure to work in extreme regions as it provides meaningful insights concerning the subregions where extremes tend to concentrate their mass. The present paper develops a novel optimization-based approach to assess the dependence structure of extremes. This support identification scheme rewrites as estimating clusters of features which best capture the support of extremes. The dimension reduction technique we provide is applied to statistical learning tasks such as feature clustering and anomaly detection. Numerical experiments provide strong empirical evidence of the relevance of our approach.", "bibtex": "@InProceedings{pmlr-v139-jalalzai21a,\n title = \t {Feature Clustering for Support Identification in Extreme Regions},\n author = {Jalalzai, Hamid and Leluc, R{\\'e}mi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4733--4743},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jalalzai21a/jalalzai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jalalzai21a.html},\n abstract = \t {Understanding the complex structure of multivariate extremes is a major challenge in various fields from portfolio monitoring and environmental risk management to insurance. In the framework of multivariate Extreme Value Theory, a common characterization of extremes\u2019 dependence structure is the angular measure. It is a suitable measure to work in extreme regions as it provides meaningful insights concerning the subregions where extremes tend to concentrate their mass. The present paper develops a novel optimization-based approach to assess the dependence structure of extremes. This support identification scheme rewrites as estimating clusters of features which best capture the support of extremes. The dimension reduction technique we provide is applied to statistical learning tasks such as feature clustering and anomaly detection. Numerical experiments provide strong empirical evidence of the relevance of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/jalalzai21a/jalalzai21a.pdf", "supp": "", "pdf_size": 691181, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17566789488024407466&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "T\u00b4el\u00b4ecom Paris, Institut Polytechnique de Paris, France+INRIA, Institut Polytechnique de Paris, France; T\u00b4el\u00b4ecom Paris, Institut Polytechnique de Paris, France", "aff_domain": "inria.fr;telecom-paris.fr", "email": "inria.fr;telecom-paris.fr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jalalzai21a.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "T\u00e9l\u00e9com Paris;INRIA", "aff_unique_dep": ";", "aff_unique_url": "https://www.telecom-paris.fr;https://www.inria.fr", "aff_unique_abbr": "T\u00e9l\u00e9com Paris;INRIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0", "aff_country_unique": "France" }, { "title": "Federated Composite Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10485", "id": "10485", "proceeding": "http://proceedings.mlr.press/v139/yuan21d.html", "slides": "/media/icml-2021/Slides/10485.pdf", "author_site": "Honglin Yuan, Manzil Zaheer, Sashank Jakkam Reddi", "author": "Honglin Yuan; Manzil Zaheer; Sashank Reddi", "abstract": "Federated Learning (FL) is a distributed learning paradigm that scales on-device learning collaboratively and privately. Standard FL algorithms such as FEDAVG are primarily geared towards smooth unconstrained settings. In this paper, we study the Federated Composite Optimization (FCO) problem, in which the loss function contains a non-smooth regularizer. Such problems arise naturally in FL applications that involve sparsity, low-rank, monotonicity, or more general constraints. We first show that straightforward extensions of primal algorithms such as FedAvg are not well-suited for FCO since they suffer from the \"curse of primal averaging,\" resulting in poor convergence. As a solution, we propose a new primal-dual algorithm, Federated Dual Averaging (FedDualAvg), which by employing a novel server dual averaging procedure circumvents the curse of primal averaging. Our theoretical analysis and empirical experiments demonstrate that FedDualAvg outperforms the other baselines.", "bibtex": "@InProceedings{pmlr-v139-yuan21d,\n title = \t {Federated Composite Optimization},\n author = {Yuan, Honglin and Zaheer, Manzil and Reddi, Sashank},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12253--12266},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yuan21d/yuan21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/yuan21d.html},\n abstract = \t {Federated Learning (FL) is a distributed learning paradigm that scales on-device learning collaboratively and privately. Standard FL algorithms such as FEDAVG are primarily geared towards smooth unconstrained settings. In this paper, we study the Federated Composite Optimization (FCO) problem, in which the loss function contains a non-smooth regularizer. Such problems arise naturally in FL applications that involve sparsity, low-rank, monotonicity, or more general constraints. We first show that straightforward extensions of primal algorithms such as FedAvg are not well-suited for FCO since they suffer from the \"curse of primal averaging,\" resulting in poor convergence. As a solution, we propose a new primal-dual algorithm, Federated Dual Averaging (FedDualAvg), which by employing a novel server dual averaging procedure circumvents the curse of primal averaging. Our theoretical analysis and empirical experiments demonstrate that FedDualAvg outperforms the other baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/yuan21d/yuan21d.pdf", "supp": "", "pdf_size": 1113893, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10805982907996173478&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Stanford University + Google Research; Google Research; Google Research", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yuan21d.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Stanford University;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.stanford.edu;https://research.google", "aff_unique_abbr": "Stanford;Google Research", "aff_campus_unique_index": "0+1;1;1", "aff_campus_unique": "Stanford;Mountain View", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Federated Continual Learning with Weighted Inter-client Transfer", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9849", "id": "9849", "proceeding": "http://proceedings.mlr.press/v139/yoon21b.html", "slides": "", "author_site": "Jaehong Yoon, Wonyong Jeong, GiWoong Lee, Eunho Yang, Sung Ju Hwang", "author": "Jaehong Yoon; Wonyong Jeong; Giwoong Lee; Eunho Yang; Sung Ju Hwang", "abstract": "There has been a surge of interest in continual learning and federated learning, both of which are important in deep neural networks in real-world scenarios. Yet little research has been done regarding the scenario where each client learns on a sequence of tasks from a private local data stream. This problem of federated continual learning poses new challenges to continual learning, such as utilizing knowledge from other clients, while preventing interference from irrelevant knowledge. To resolve these issues, we propose a novel federated continual learning framework, Federated Weighted Inter-client Transfer (FedWeIT), which decomposes the network weights into global federated parameters and sparse task-specific parameters, and each client receives selective knowledge from other clients by taking a weighted combination of their task-specific parameters. FedWeIT minimizes interference between incompatible tasks, and also allows positive knowledge transfer across clients during learning. We validate our FedWeIT against existing federated learning and continual learning methods under varying degrees of task similarity across clients, and our model significantly outperforms them with a large reduction in the communication cost.", "bibtex": "@InProceedings{pmlr-v139-yoon21b,\n title = \t {Federated Continual Learning with Weighted Inter-client Transfer},\n author = {Yoon, Jaehong and Jeong, Wonyong and Lee, Giwoong and Yang, Eunho and Hwang, Sung Ju},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12073--12086},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoon21b/yoon21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoon21b.html},\n abstract = \t {There has been a surge of interest in continual learning and federated learning, both of which are important in deep neural networks in real-world scenarios. Yet little research has been done regarding the scenario where each client learns on a sequence of tasks from a private local data stream. This problem of federated continual learning poses new challenges to continual learning, such as utilizing knowledge from other clients, while preventing interference from irrelevant knowledge. To resolve these issues, we propose a novel federated continual learning framework, Federated Weighted Inter-client Transfer (FedWeIT), which decomposes the network weights into global federated parameters and sparse task-specific parameters, and each client receives selective knowledge from other clients by taking a weighted combination of their task-specific parameters. FedWeIT minimizes interference between incompatible tasks, and also allows positive knowledge transfer across clients during learning. We validate our FedWeIT against existing federated learning and continual learning methods under varying degrees of task similarity across clients, and our model significantly outperforms them with a large reduction in the communication cost.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoon21b/yoon21b.pdf", "supp": "", "pdf_size": 5286153, "gs_citation": 262, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6346174361267860505&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Korea Advanced Institute of Science and Technology (KAIST), South Korea+AITRICS, South Korea; Korea Advanced Institute of Science and Technology (KAIST), South Korea+AITRICS, South Korea; Korea Advanced Institute of Science and Technology (KAIST), South Korea; Korea Advanced Institute of Science and Technology (KAIST), South Korea+AITRICS, South Korea; Korea Advanced Institute of Science and Technology (KAIST), South Korea+AITRICS, South Korea", "aff_domain": "kaist.ac.kr;kaist.ac.kr; ; ; ", "email": "kaist.ac.kr;kaist.ac.kr; ; ; ", "github": "https://github.com/wyjeong/FedWeIT", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yoon21b.html", "aff_unique_index": "0+1;0+1;0;0+1;0+1", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;AITRICS", "aff_unique_dep": ";", "aff_unique_url": "https://www.kaist.ac.kr;", "aff_unique_abbr": "KAIST;", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0;0+0;0+0", "aff_country_unique": "South Korea" }, { "title": "Federated Deep AUC Maximization for Hetergeneous Data with a Constant Communication Complexity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9645", "id": "9645", "proceeding": "http://proceedings.mlr.press/v139/yuan21a.html", "slides": "", "author_site": "Zhuoning Yuan, Zhishuai Guo, Yi Xu, Yiming Ying, Tianbao Yang", "author": "Zhuoning Yuan; Zhishuai Guo; Yi Xu; Yiming Ying; Tianbao Yang", "abstract": "Deep AUC (area under the ROC curve) Maximization (DAM) has attracted much attention recently due to its great potential for imbalanced data classification. However, the research on Federated Deep AUC Maximization (FDAM) is still limited. Compared with standard federated learning (FL) approaches that focus on decomposable minimization objectives, FDAM is more complicated due to its minimization objective is non-decomposable over individual examples. In this paper, we propose improved FDAM algorithms for heterogeneous data by solving the popular non-convex strongly-concave min-max formulation of DAM in a distributed fashion, which can also be applied to a class of non-convex strongly-concave min-max problems. A striking result of this paper is that the communication complexity of the proposed algorithm is a constant independent of the number of machines and also independent of the accuracy level, which improves an existing result by orders of magnitude. The experiments have demonstrated the effectiveness of our FDAM algorithm on benchmark datasets, and on medical chest X-ray images from different organizations. Our experiment shows that the performance of FDAM using data from multiple hospitals can improve the AUC score on testing data from a single hospital for detecting life-threatening diseases based on chest radiographs.", "bibtex": "@InProceedings{pmlr-v139-yuan21a,\n title = \t {Federated Deep AUC Maximization for Hetergeneous Data with a Constant Communication Complexity},\n author = {Yuan, Zhuoning and Guo, Zhishuai and Xu, Yi and Ying, Yiming and Yang, Tianbao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12219--12229},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yuan21a/yuan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yuan21a.html},\n abstract = \t {Deep AUC (area under the ROC curve) Maximization (DAM) has attracted much attention recently due to its great potential for imbalanced data classification. However, the research on Federated Deep AUC Maximization (FDAM) is still limited. Compared with standard federated learning (FL) approaches that focus on decomposable minimization objectives, FDAM is more complicated due to its minimization objective is non-decomposable over individual examples. In this paper, we propose improved FDAM algorithms for heterogeneous data by solving the popular non-convex strongly-concave min-max formulation of DAM in a distributed fashion, which can also be applied to a class of non-convex strongly-concave min-max problems. A striking result of this paper is that the communication complexity of the proposed algorithm is a constant independent of the number of machines and also independent of the accuracy level, which improves an existing result by orders of magnitude. The experiments have demonstrated the effectiveness of our FDAM algorithm on benchmark datasets, and on medical chest X-ray images from different organizations. Our experiment shows that the performance of FDAM using data from multiple hospitals can improve the AUC score on testing data from a single hospital for detecting life-threatening diseases based on chest radiographs.}\n}", "pdf": "http://proceedings.mlr.press/v139/yuan21a/yuan21a.pdf", "supp": "", "pdf_size": 995136, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5180855486571428454&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Iowa; Department of Computer Science, University of Iowa; Machine Intelligence Technology, Alibaba Group; Department of Mathematics and Statistics, State University of New York at Albany; Department of Computer Science, University of Iowa", "aff_domain": "uiowa.edu; ; ; ;uiowa.edu", "email": "uiowa.edu; ; ; ;uiowa.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yuan21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "University of Iowa;Alibaba Group;State University of New York at Albany", "aff_unique_dep": "Department of Computer Science;Machine Intelligence Technology;Department of Mathematics and Statistics", "aff_unique_url": "https://www.uiowa.edu;https://www.alibaba.com;https://www.albany.edu", "aff_unique_abbr": "UIowa;Alibaba;SUNY Albany", "aff_campus_unique_index": "1", "aff_campus_unique": ";Albany", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "Federated Learning of User Verification Models Without Sharing Embeddings", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8753", "id": "8753", "proceeding": "http://proceedings.mlr.press/v139/hosseini21a.html", "slides": "/media/icml-2021/Slides/8753.pdf", "author_site": "Hossein Hosseini, Hyunsin Park, Sungrack Yun, Christos Louizos, Joseph B Soriaga, Max Welling", "author": "Hossein Hosseini; Hyunsin Park; Sungrack Yun; Christos Louizos; Joseph Soriaga; Max Welling", "abstract": "We consider the problem of training User Verification (UV) models in federated setup, where each user has access to the data of only one class and user embeddings cannot be shared with the server or other users. To address this problem, we propose Federated User Verification (FedUV), a framework in which users jointly learn a set of vectors and maximize the correlation of their instance embeddings with a secret linear combination of those vectors. We show that choosing the linear combinations from the codewords of an error-correcting code allows users to collaboratively train the model without revealing their embedding vectors. We present the experimental results for user verification with voice, face, and handwriting data and show that FedUV is on par with existing approaches, while not sharing the embeddings with other users or the server.", "bibtex": "@InProceedings{pmlr-v139-hosseini21a,\n title = \t {Federated Learning of User Verification Models Without Sharing Embeddings},\n author = {Hosseini, Hossein and Park, Hyunsin and Yun, Sungrack and Louizos, Christos and Soriaga, Joseph and Welling, Max},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4328--4336},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hosseini21a/hosseini21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hosseini21a.html},\n abstract = \t {We consider the problem of training User Verification (UV) models in federated setup, where each user has access to the data of only one class and user embeddings cannot be shared with the server or other users. To address this problem, we propose Federated User Verification (FedUV), a framework in which users jointly learn a set of vectors and maximize the correlation of their instance embeddings with a secret linear combination of those vectors. We show that choosing the linear combinations from the codewords of an error-correcting code allows users to collaboratively train the model without revealing their embedding vectors. We present the experimental results for user verification with voice, face, and handwriting data and show that FedUV is on par with existing approaches, while not sharing the embeddings with other users or the server.}\n}", "pdf": "http://proceedings.mlr.press/v139/hosseini21a/hosseini21a.pdf", "supp": "", "pdf_size": 4404805, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9412192735455482132&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Qualcomm AI Research; Qualcomm AI Research; Qualcomm AI Research; Qualcomm AI Research; Qualcomm AI Research; Qualcomm AI Research", "aff_domain": "qti.qualcomm.com; ; ; ; ; ", "email": "qti.qualcomm.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/hosseini21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Qualcomm", "aff_unique_dep": "Qualcomm AI Research", "aff_unique_url": "https://www.qualcomm.com/research", "aff_unique_abbr": "QAI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Federated Learning under Arbitrary Communication Patterns", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9203", "id": "9203", "proceeding": "http://proceedings.mlr.press/v139/avdiukhin21a.html", "slides": "", "author_site": "Dmitrii Avdiukhin, Shiva Kasiviswanathan", "author": "Dmitrii Avdiukhin; Shiva Kasiviswanathan", "abstract": "Federated Learning is a distributed learning setting where the goal is to train a centralized model with training data distributed over a large number of heterogeneous clients, each with unreliable and relatively slow network connections. A common optimization approach used in federated learning is based on the idea of local SGD: each client runs some number of SGD steps locally and then the updated local models are averaged to form the updated global model on the coordinating server. In this paper, we investigate the performance of an asynchronous version of local SGD wherein the clients can communicate with the server at arbitrary time intervals. Our main result shows that for smooth strongly convex and smooth nonconvex functions we achieve convergence rates that match the synchronous version that requires all clients to communicate simultaneously.", "bibtex": "@InProceedings{pmlr-v139-avdiukhin21a,\n title = \t {Federated Learning under Arbitrary Communication Patterns},\n author = {Avdiukhin, Dmitrii and Kasiviswanathan, Shiva},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {425--435},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/avdiukhin21a/avdiukhin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/avdiukhin21a.html},\n abstract = \t {Federated Learning is a distributed learning setting where the goal is to train a centralized model with training data distributed over a large number of heterogeneous clients, each with unreliable and relatively slow network connections. A common optimization approach used in federated learning is based on the idea of local SGD: each client runs some number of SGD steps locally and then the updated local models are averaged to form the updated global model on the coordinating server. In this paper, we investigate the performance of an asynchronous version of local SGD wherein the clients can communicate with the server at arbitrary time intervals. Our main result shows that for smooth strongly convex and smooth nonconvex functions we achieve convergence rates that match the synchronous version that requires all clients to communicate simultaneously.}\n}", "pdf": "http://proceedings.mlr.press/v139/avdiukhin21a/avdiukhin21a.pdf", "supp": "", "pdf_size": 1623606, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6047701639744163311&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Indiana University, Bloomington, IN, USA; Amazon, Palo Alto, CA, USA", "aff_domain": "iu.edu;gmail.com", "email": "iu.edu;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/avdiukhin21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Indiana University;Amazon", "aff_unique_dep": "Department of Computer Science;Amazon", "aff_unique_url": "https://www.indiana.edu;https://www.amazon.com", "aff_unique_abbr": "IU;Amazon", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Bloomington;Palo Alto", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Few-Shot Conformal Prediction with Auxiliary Tasks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8739", "id": "8739", "proceeding": "http://proceedings.mlr.press/v139/fisch21a.html", "slides": "/media/icml-2021/Slides/8739.pdf", "author_site": "Adam Fisch, Tal Schuster, Tommi Jaakkola, Regina Barzilay", "author": "Adam Fisch; Tal Schuster; Tommi Jaakkola; Dr.Regina Barzilay", "abstract": "We develop a novel approach to conformal prediction when the target task has limited data available for training. Conformal prediction identifies a small set of promising output candidates in place of a single prediction, with guarantees that the set contains the correct answer with high probability. When training data is limited, however, the predicted set can easily become unusably large. In this work, we obtain substantially tighter prediction sets while maintaining desirable marginal guarantees by casting conformal prediction as a meta-learning paradigm over exchangeable collections of auxiliary tasks. Our conformalization algorithm is simple, fast, and agnostic to the choice of underlying model, learning algorithm, or dataset. We demonstrate the effectiveness of this approach across a number of few-shot classification and regression tasks in natural language processing, computer vision, and computational chemistry for drug discovery.", "bibtex": "@InProceedings{pmlr-v139-fisch21a,\n title = \t {Few-Shot Conformal Prediction with Auxiliary Tasks},\n author = {Fisch, Adam and Schuster, Tal and Jaakkola, Tommi and Barzilay, Dr.Regina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3329--3339},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fisch21a/fisch21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fisch21a.html},\n abstract = \t {We develop a novel approach to conformal prediction when the target task has limited data available for training. Conformal prediction identifies a small set of promising output candidates in place of a single prediction, with guarantees that the set contains the correct answer with high probability. When training data is limited, however, the predicted set can easily become unusably large. In this work, we obtain substantially tighter prediction sets while maintaining desirable marginal guarantees by casting conformal prediction as a meta-learning paradigm over exchangeable collections of auxiliary tasks. Our conformalization algorithm is simple, fast, and agnostic to the choice of underlying model, learning algorithm, or dataset. We demonstrate the effectiveness of this approach across a number of few-shot classification and regression tasks in natural language processing, computer vision, and computational chemistry for drug discovery.}\n}", "pdf": "http://proceedings.mlr.press/v139/fisch21a/fisch21a.pdf", "supp": "", "pdf_size": 1840316, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10162141541577160393&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, MA, USA", "aff_domain": "csail.mit.edu; ; ; ", "email": "csail.mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fisch21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Few-Shot Neural Architecture Search", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8891", "id": "8891", "proceeding": "http://proceedings.mlr.press/v139/zhao21d.html", "slides": "", "author_site": "Yiyang Zhao, Linnan Wang, Yuandong Tian, Rodrigo Fonseca, Tian Guo", "author": "Yiyang Zhao; Linnan Wang; Yuandong Tian; Rodrigo Fonseca; Tian Guo", "abstract": "Efficient evaluation of a network architecture drawn from a large search space remains a key challenge in Neural Architecture Search (NAS). Vanilla NAS evaluates each architecture by training from scratch, which gives the true performance but is extremely time-consuming. Recently, one-shot NAS substantially reduces the computation cost by training only one supernetwork, a.k.a. supernet, to approximate the performance of every architecture in the search space via weight-sharing. However, the performance estimation can be very inaccurate due to the co-adaption among operations. In this paper, we propose few-shot NAS that uses multiple supernetworks, called sub-supernet, each covering different regions of the search space to alleviate the undesired co-adaption. Compared to one-shot NAS, few-shot NAS improves the accuracy of architecture evaluation with a small increase of evaluation cost. With only up to 7 sub-supernets, few-shot NAS establishes new SoTAs: on ImageNet, it finds models that reach 80.5% top-1 accuracy at 600 MB FLOPS and 77.5% top-1 accuracy at 238 MFLOPS; on CIFAR10, it reaches 98.72% top-1 accuracy without using extra data or transfer learning. In Auto-GAN, few-shot NAS outperforms the previously published results by up to 20%. Extensive experiments show that few-shot NAS significantly improves various one-shot methods, including 4 gradient-based and 6 search-based methods on 3 different tasks in NasBench-201 and NasBench1-shot-1.", "bibtex": "@InProceedings{pmlr-v139-zhao21d,\n title = \t {Few-Shot Neural Architecture Search},\n author = {Zhao, Yiyang and Wang, Linnan and Tian, Yuandong and Fonseca, Rodrigo and Guo, Tian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12707--12718},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhao21d/zhao21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhao21d.html},\n abstract = \t {Efficient evaluation of a network architecture drawn from a large search space remains a key challenge in Neural Architecture Search (NAS). Vanilla NAS evaluates each architecture by training from scratch, which gives the true performance but is extremely time-consuming. Recently, one-shot NAS substantially reduces the computation cost by training only one supernetwork, a.k.a. supernet, to approximate the performance of every architecture in the search space via weight-sharing. However, the performance estimation can be very inaccurate due to the co-adaption among operations. In this paper, we propose few-shot NAS that uses multiple supernetworks, called sub-supernet, each covering different regions of the search space to alleviate the undesired co-adaption. Compared to one-shot NAS, few-shot NAS improves the accuracy of architecture evaluation with a small increase of evaluation cost. With only up to 7 sub-supernets, few-shot NAS establishes new SoTAs: on ImageNet, it finds models that reach 80.5% top-1 accuracy at 600 MB FLOPS and 77.5% top-1 accuracy at 238 MFLOPS; on CIFAR10, it reaches 98.72% top-1 accuracy without using extra data or transfer learning. In Auto-GAN, few-shot NAS outperforms the previously published results by up to 20%. Extensive experiments show that few-shot NAS significantly improves various one-shot methods, including 4 gradient-based and 6 search-based methods on 3 different tasks in NasBench-201 and NasBench1-shot-1.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhao21d/zhao21d.pdf", "supp": "", "pdf_size": 1350057, "gs_citation": 138, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=668653762741709836&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Worcester Polytechnic Institute; Brown University; Facebook AI Research; Brown University; Worcester Polytechnic Institute", "aff_domain": "wpi.edu; ; ; ; ", "email": "wpi.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhao21d.html", "aff_unique_index": "0;1;2;1;0", "aff_unique_norm": "Worcester Polytechnic Institute;Brown University;Meta", "aff_unique_dep": ";;Facebook AI Research", "aff_unique_url": "https://www.wpi.edu;https://www.brown.edu;https://research.facebook.com", "aff_unique_abbr": "WPI;Brown;FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Few-shot Language Coordination by Modeling Theory of Mind", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9765", "id": "9765", "proceeding": "http://proceedings.mlr.press/v139/zhu21d.html", "slides": "/media/icml-2021/Slides/9765.pdf", "author_site": "Hao Zhu, Graham Neubig, Yonatan Bisk", "author": "Hao Zhu; Graham Neubig; Yonatan Bisk", "abstract": "No man is an island. Humans develop the ability to communicate with a large community by coordinating with different interlocutors within short conversations. This ability is largely understudied by the research on building neural language communicative agents. We study the task of few-shot language coordination: agents quickly adapting to their conversational partners\u2019 language abilities. Different from current communicative agents trained with self-play, we in- investigate this more general paradigm by requiring the lead agent to coordinate with a population of agents each of whom has different linguistic abilities. This leads to a general agent able to quickly adapt to communicating with unseen agents in the population. Unlike prior work, success here requires the ability to model the partner\u2019s beliefs, a vital component of human communication. Drawing inspiration from the study of theory-of-mind (ToM; Premack & Woodruff (1978)), we study the effect of the speaker explicitly modeling the listener\u2019s mental state. Learning by communicating with a population, the speakers, as shown in our experiments, acquire the ability to learn to predict the reactions of their partner upon various messages on-the-fly. The speaker\u2019s predictions for the future actions help it generate the best instructions in order to maximize communicative goal with message costs. To examine our hypothesis that the instructions generated with ToM modeling yield better communication per- performance, we employ our agents in both a referential game and a language navigation task. Positive results from our experiments also hint at the importance of explicitly modeling language acquisition as a socio-pragmatic progress.", "bibtex": "@InProceedings{pmlr-v139-zhu21d,\n title = \t {Few-shot Language Coordination by Modeling Theory of Mind},\n author = {Zhu, Hao and Neubig, Graham and Bisk, Yonatan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12901--12911},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21d/zhu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21d.html},\n abstract = \t {No man is an island. Humans develop the ability to communicate with a large community by coordinating with different interlocutors within short conversations. This ability is largely understudied by the research on building neural language communicative agents. We study the task of few-shot language coordination: agents quickly adapting to their conversational partners\u2019 language abilities. Different from current communicative agents trained with self-play, we in- investigate this more general paradigm by requiring the lead agent to coordinate with a population of agents each of whom has different linguistic abilities. This leads to a general agent able to quickly adapt to communicating with unseen agents in the population. Unlike prior work, success here requires the ability to model the partner\u2019s beliefs, a vital component of human communication. Drawing inspiration from the study of theory-of-mind (ToM; Premack & Woodruff (1978)), we study the effect of the speaker explicitly modeling the listener\u2019s mental state. Learning by communicating with a population, the speakers, as shown in our experiments, acquire the ability to learn to predict the reactions of their partner upon various messages on-the-fly. The speaker\u2019s predictions for the future actions help it generate the best instructions in order to maximize communicative goal with message costs. To examine our hypothesis that the instructions generated with ToM modeling yield better communication per- performance, we employ our agents in both a referential game and a language navigation task. Positive results from our experiments also hint at the importance of explicitly modeling language acquisition as a socio-pragmatic progress.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21d/zhu21d.pdf", "supp": "", "pdf_size": 635099, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12854818907467406853&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", "aff_domain": "cmu.edu; ; ", "email": "cmu.edu; ; ", "github": "https://github.com/CLAW-Lab/ToM", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21d.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Language Technologies Institute", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Finding Relevant Information via a Discrete Fourier Expansion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10135", "id": "10135", "proceeding": "http://proceedings.mlr.press/v139/heidari21a.html", "slides": "/media/icml-2021/Slides/10135.pdf", "author_site": "Mohsen Heidari, Jithin Sreedharan, Gil Shamir, Wojciech Szpankowski", "author": "Mohsen Heidari; Jithin Sreedharan; Gil I Shamir; Wojciech Szpankowski", "abstract": "A fundamental obstacle in learning information from data is the presence of nonlinear redundancies and dependencies in it. To address this, we propose a Fourier-based approach to extract relevant information in the supervised setting. We first develop a novel Fourier expansion for functions of correlated binary random variables. This expansion is a generalization of the standard Fourier analysis on the Boolean cube beyond product probability spaces. We further extend our Fourier analysis to stochastic mappings. As an important application of this analysis, we investigate learning with feature subset selection. We reformulate this problem in the Fourier domain and introduce a computationally efficient measure for selecting features. Bridging the Bayesian error rate with the Fourier coefficients, we demonstrate that the Fourier expansion provides a powerful tool to characterize nonlinear dependencies in the features-label relation. Via theoretical analysis, we show that our proposed measure finds provably asymptotically optimal feature subsets. Lastly, we present an algorithm based on our measure and verify our findings via numerical experiments on various datasets.", "bibtex": "@InProceedings{pmlr-v139-heidari21a,\n title = \t {Finding Relevant Information via a Discrete Fourier Expansion},\n author = {Heidari, Mohsen and Sreedharan, Jithin and Shamir, Gil I and Szpankowski, Wojciech},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4181--4191},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/heidari21a/heidari21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/heidari21a.html},\n abstract = \t {A fundamental obstacle in learning information from data is the presence of nonlinear redundancies and dependencies in it. To address this, we propose a Fourier-based approach to extract relevant information in the supervised setting. We first develop a novel Fourier expansion for functions of correlated binary random variables. This expansion is a generalization of the standard Fourier analysis on the Boolean cube beyond product probability spaces. We further extend our Fourier analysis to stochastic mappings. As an important application of this analysis, we investigate learning with feature subset selection. We reformulate this problem in the Fourier domain and introduce a computationally efficient measure for selecting features. Bridging the Bayesian error rate with the Fourier coefficients, we demonstrate that the Fourier expansion provides a powerful tool to characterize nonlinear dependencies in the features-label relation. Via theoretical analysis, we show that our proposed measure finds provably asymptotically optimal feature subsets. Lastly, we present an algorithm based on our measure and verify our findings via numerical experiments on various datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/heidari21a/heidari21a.pdf", "supp": "", "pdf_size": 830465, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15080131682719390970&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "NSF Center for Science of Information, Purdue University, West Lafayette, USA; Wadhwani AI, Mumbai, India; Google Inc., Pittsburgh, USA; NSF Center for Science of Information, Purdue University, West Lafayette, USA", "aff_domain": "purdue.edu;wadhwaniai.org; ; ", "email": "purdue.edu;wadhwaniai.org; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/heidari21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Purdue University;Wadhwani AI;Google", "aff_unique_dep": "NSF Center for Science of Information;;Google", "aff_unique_url": "https://www.purdue.edu;;https://www.google.com", "aff_unique_abbr": "Purdue;;Google", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "West Lafayette;Mumbai;Pittsburgh", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;India" }, { "title": "Finding k in Latent $k-$ polytope", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8751", "id": "8751", "proceeding": "http://proceedings.mlr.press/v139/bhattacharyya21a.html", "slides": "", "author_site": "Chiranjib Bhattacharyya, Ravindran Kannan, Amit Kumar", "author": "Chiranjib Bhattacharyya; Ravindran Kannan; Amit Kumar", "abstract": "The recently introduced Latent $k-$ Polytope($\\LkP$) encompasses several stochastic Mixed Membership models including Topic Models. The problem of finding $k$, the number of extreme points of $\\LkP$, is a fundamental challenge and includes several important open problems such as determination of number of components in Ad-mixtures. This paper addresses this challenge by introducing Interpolative Convex Rank(\\INR) of a matrix defined as the minimum number of its columns whose convex hull is within Hausdorff distance $\\varepsilon$ of the convex hull of all columns. The first important contribution of this paper is to show that under \\emph{standard assumptions} $k$ equals the \\INR of a \\emph{subset smoothed data matrix} defined from Data generated from an $\\LkP$. The second important contribution of the paper is a polynomial time algorithm for finding $k$ under standard assumptions. An immediate corollary is the first polynomial time algorithm for finding the \\emph{inner dimension} in Non-negative matrix factorisation(NMF) with assumptions which are qualitatively different than existing ones such as \\emph{Separability}. %An immediate corollary is the first polynomial time algorithm for finding the \\emph{inner dimension} in Non-negative matrix factorisation(NMF) with assumptions considerably weaker than \\emph{Separability}.", "bibtex": "@InProceedings{pmlr-v139-bhattacharyya21a,\n title = \t {Finding k in Latent $k-$ polytope},\n author = {Bhattacharyya, Chiranjib and Kannan, Ravindran and Kumar, Amit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {894--903},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bhattacharyya21a/bhattacharyya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bhattacharyya21a.html},\n abstract = \t {The recently introduced Latent $k-$ Polytope($\\LkP$) encompasses several stochastic Mixed Membership models including Topic Models. The problem of finding $k$, the number of extreme points of $\\LkP$, is a fundamental challenge and includes several important open problems such as determination of number of components in Ad-mixtures. This paper addresses this challenge by introducing Interpolative Convex Rank(\\INR) of a matrix defined as the minimum number of its columns whose convex hull is within Hausdorff distance $\\varepsilon$ of the convex hull of all columns. The first important contribution of this paper is to show that under \\emph{standard assumptions} $k$ equals the \\INR of a \\emph{subset smoothed data matrix} defined from Data generated from an $\\LkP$. The second important contribution of the paper is a polynomial time algorithm for finding $k$ under standard assumptions. An immediate corollary is the first polynomial time algorithm for finding the \\emph{inner dimension} in Non-negative matrix factorisation(NMF) with assumptions which are qualitatively different than existing ones such as \\emph{Separability}. %An immediate corollary is the first polynomial time algorithm for finding the \\emph{inner dimension} in Non-negative matrix factorisation(NMF) with assumptions considerably weaker than \\emph{Separability}.}\n}", "pdf": "http://proceedings.mlr.press/v139/bhattacharyya21a/bhattacharyya21a.pdf", "supp": "", "pdf_size": 356087, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11784829528188070725&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Automation, IISc Bangalore, India; Microsoft Research India Lab., Bangalore, India; Department of Computer Science and Engineering, IIT Delhi, India", "aff_domain": "iisc.ac.in; ; ", "email": "iisc.ac.in; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bhattacharyya21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Indian Institute of Science;Microsoft;Indian Institute of Technology Delhi", "aff_unique_dep": "Department of Computer Science and Automation;Microsoft Research;Department of Computer Science and Engineering", "aff_unique_url": "https://www.iisc.ac.in;https://www.microsoft.com/en-us/research/group/microsoft-research-india/;https://www.iitd.ac.in", "aff_unique_abbr": "IISc;MSR India;IIT Delhi", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Bangalore;Delhi", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "Finding the Stochastic Shortest Path with Low Regret: the Adversarial Cost and Unknown Transition Case", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10629", "id": "10629", "proceeding": "http://proceedings.mlr.press/v139/chen21l.html", "slides": "/media/icml-2021/Slides/10629.pdf", "author_site": "Liyu Chen, Haipeng Luo", "author": "Liyu Chen; Haipeng Luo", "abstract": "We make significant progress toward the stochastic shortest path problem with adversarial costs and unknown transition. Specifically, we develop algorithms that achieve $O(\\sqrt{S^2ADT_\\star K})$ regret for the full-information setting and $O(\\sqrt{S^3A^2DT_\\star K})$ regret for the bandit feedback setting, where $D$ is the diameter, $T_\\star$ is the expected hitting time of the optimal policy, $S$ is the number of states, $A$ is the number of actions, and $K$ is the number of episodes. Our work strictly improves (Rosenberg and Mansour, 2020) in the full information setting, extends (Chen et al., 2020) from known transition to unknown transition, and is also the first to consider the most challenging combination: bandit feedback with adversarial costs and unknown transition. To remedy the gap between our upper bounds and the current best lower bounds constructed via a stochastically oblivious adversary, we also propose algorithms with near-optimal regret for this special case.", "bibtex": "@InProceedings{pmlr-v139-chen21l,\n title = \t {Finding the Stochastic Shortest Path with Low Regret: the Adversarial Cost and Unknown Transition Case},\n author = {Chen, Liyu and Luo, Haipeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1651--1660},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21l/chen21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21l.html},\n abstract = \t {We make significant progress toward the stochastic shortest path problem with adversarial costs and unknown transition. Specifically, we develop algorithms that achieve $O(\\sqrt{S^2ADT_\\star K})$ regret for the full-information setting and $O(\\sqrt{S^3A^2DT_\\star K})$ regret for the bandit feedback setting, where $D$ is the diameter, $T_\\star$ is the expected hitting time of the optimal policy, $S$ is the number of states, $A$ is the number of actions, and $K$ is the number of episodes. Our work strictly improves (Rosenberg and Mansour, 2020) in the full information setting, extends (Chen et al., 2020) from known transition to unknown transition, and is also the first to consider the most challenging combination: bandit feedback with adversarial costs and unknown transition. To remedy the gap between our upper bounds and the current best lower bounds constructed via a stochastically oblivious adversary, we also propose algorithms with near-optimal regret for this special case.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21l/chen21l.pdf", "supp": "", "pdf_size": 432137, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2428125720194190176&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Southern California; University of Southern California", "aff_domain": "usc.edu; ", "email": "usc.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/chen21l.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Southern California", "aff_unique_dep": "", "aff_unique_url": "https://www.usc.edu", "aff_unique_abbr": "USC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Finite mixture models do not reliably learn the number of components", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10705", "id": "10705", "proceeding": "http://proceedings.mlr.press/v139/cai21a.html", "slides": "", "author_site": "Diana Cai, Trevor Campbell, Tamara Broderick", "author": "Diana Cai; Trevor Campbell; Tamara Broderick", "abstract": "Scientists and engineers are often interested in learning the number of subpopulations (or components) present in a data set. A common suggestion is to use a finite mixture model (FMM) with a prior on the number of components. Past work has shown the resulting FMM component-count posterior is consistent; that is, the posterior concentrates on the true, generating number of components. But consistency requires the assumption that the component likelihoods are perfectly specified, which is unrealistic in practice. In this paper, we add rigor to data-analysis folk wisdom by proving that under even the slightest model misspecification, the FMM component-count posterior diverges: the posterior probability of any particular finite number of components converges to 0 in the limit of infinite data. Contrary to intuition, posterior-density consistency is not sufficient to establish this result. We develop novel sufficient conditions that are more realistic and easily checkable than those common in the asymptotics literature. We illustrate practical consequences of our theory on simulated and real data.", "bibtex": "@InProceedings{pmlr-v139-cai21a,\n title = \t {Finite mixture models do not reliably learn the number of components},\n author = {Cai, Diana and Campbell, Trevor and Broderick, Tamara},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1158--1169},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21a/cai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21a.html},\n abstract = \t {Scientists and engineers are often interested in learning the number of subpopulations (or components) present in a data set. A common suggestion is to use a finite mixture model (FMM) with a prior on the number of components. Past work has shown the resulting FMM component-count posterior is consistent; that is, the posterior concentrates on the true, generating number of components. But consistency requires the assumption that the component likelihoods are perfectly specified, which is unrealistic in practice. In this paper, we add rigor to data-analysis folk wisdom by proving that under even the slightest model misspecification, the FMM component-count posterior diverges: the posterior probability of any particular finite number of components converges to 0 in the limit of infinite data. Contrary to intuition, posterior-density consistency is not sufficient to establish this result. We develop novel sufficient conditions that are more realistic and easily checkable than those common in the asymptotics literature. We illustrate practical consequences of our theory on simulated and real data.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21a/cai21a.pdf", "supp": "", "pdf_size": 1631685, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7957183325639814563&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Princeton University; Department of Statistics, University of British Columbia; CSAIL, Massachusetts Institute of Technology", "aff_domain": "cs.princeton.edu;stat.ubc.ca;csail.mit.edu", "email": "cs.princeton.edu;stat.ubc.ca;csail.mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cai21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Princeton University;University of British Columbia;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Computer Science;Department of Statistics;Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.princeton.edu;https://www.ubc.ca;https://www.csail.mit.edu", "aff_unique_abbr": "Princeton;UBC;MIT", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Vancouver;Cambridge", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "Finite-Sample Analysis of Off-Policy Natural Actor-Critic Algorithm", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10627", "id": "10627", "proceeding": "http://proceedings.mlr.press/v139/khodadadian21a.html", "slides": "", "author_site": "sajad khodadadian, Zaiwei Chen, Siva Maguluri", "author": "Sajad Khodadadian; Zaiwei Chen; Siva Theja Maguluri", "abstract": "In this paper, we provide finite-sample convergence guarantees for an off-policy variant of the natural actor-critic (NAC) algorithm based on Importance Sampling. In particular, we show that the algorithm converges to a global optimal policy with a sample complexity of $\\mathcal{O}(\\epsilon^{-3}\\log^2(1/\\epsilon))$ under an appropriate choice of stepsizes. In order to overcome the issue of large variance due to Importance Sampling, we propose the $Q$-trace algorithm for the critic, which is inspired by the V-trace algorithm (Espeholt et al., 2018). This enables us to explicitly control the bias and variance, and characterize the trade-off between them. As an advantage of off-policy sampling, a major feature of our result is that we do not need any additional assumptions, beyond the ergodicity of the Markov chain induced by the behavior policy.", "bibtex": "@InProceedings{pmlr-v139-khodadadian21a,\n title = \t {Finite-Sample Analysis of Off-Policy Natural Actor-Critic Algorithm},\n author = {Khodadadian, Sajad and Chen, Zaiwei and Maguluri, Siva Theja},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5420--5431},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/khodadadian21a/khodadadian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/khodadadian21a.html},\n abstract = \t {In this paper, we provide finite-sample convergence guarantees for an off-policy variant of the natural actor-critic (NAC) algorithm based on Importance Sampling. In particular, we show that the algorithm converges to a global optimal policy with a sample complexity of $\\mathcal{O}(\\epsilon^{-3}\\log^2(1/\\epsilon))$ under an appropriate choice of stepsizes. In order to overcome the issue of large variance due to Importance Sampling, we propose the $Q$-trace algorithm for the critic, which is inspired by the V-trace algorithm (Espeholt et al., 2018). This enables us to explicitly control the bias and variance, and characterize the trade-off between them. As an advantage of off-policy sampling, a major feature of our result is that we do not need any additional assumptions, beyond the ergodicity of the Markov chain induced by the behavior policy.}\n}", "pdf": "http://proceedings.mlr.press/v139/khodadadian21a/khodadadian21a.pdf", "supp": "", "pdf_size": 520381, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1184983790792272144&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "School of Industrial and Systems Engineering, Georgia Institute of Technology, Atlanta, GA, 30332, USA+PhD Program in Machine Learning, Georgia Institute of Technology, Atlanta, GA, 30332, USA; PhD Program in Machine Learning, Georgia Institute of Technology, Atlanta, GA, 30332, USA; School of Industrial and Systems Engineering, Georgia Institute of Technology, Atlanta, GA, 30332, USA", "aff_domain": "gatech.edu;gatech.edu; ", "email": "gatech.edu;gatech.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/khodadadian21a.html", "aff_unique_index": "0+0;0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "School of Industrial and Systems Engineering", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "First-Order Methods for Wasserstein Distributionally Robust MDP", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8837", "id": "8837", "proceeding": "http://proceedings.mlr.press/v139/clement21a.html", "slides": "/media/icml-2021/Slides/8837.pdf", "author_site": "Julien Grand-Clement, Christian Kroer", "author": "Julien Grand Clement; Christian Kroer", "abstract": "Markov decision processes (MDPs) are known to be sensitive to parameter specification. Distributionally robust MDPs alleviate this issue by allowing for \\textit{ambiguity sets} which give a set of possible distributions over parameter sets. The goal is to find an optimal policy with respect to the worst-case parameter distribution. We propose a framework for solving Distributionally robust MDPs via first-order methods, and instantiate it for several types of Wasserstein ambiguity sets. By developing efficient proximal updates, our algorithms achieve a convergence rate of $O\\left(NA^{2.5}S^{3.5}\\log(S)\\log(\\epsilon^{-1})\\epsilon^{-1.5} \\right)$ for the number of kernels $N$ in the support of the nominal distribution, states $S$, and actions $A$; this rate varies slightly based on the Wasserstein setup. Our dependence on $N,A$ and $S$ is significantly better than existing methods, which have a complexity of $O\\left(N^{3.5}A^{3.5}S^{4.5}\\log^{2}(\\epsilon^{-1}) \\right)$. Numerical experiments show that our algorithm is significantly more scalable than state-of-the-art approaches across several domains.", "bibtex": "@InProceedings{pmlr-v139-clement21a,\n title = \t {First-Order Methods for Wasserstein Distributionally Robust MDP},\n author = {Clement, Julien Grand and Kroer, Christian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2010--2019},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/clement21a/clement21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/clement21a.html},\n abstract = \t {Markov decision processes (MDPs) are known to be sensitive to parameter specification. Distributionally robust MDPs alleviate this issue by allowing for \\textit{ambiguity sets} which give a set of possible distributions over parameter sets. The goal is to find an optimal policy with respect to the worst-case parameter distribution. We propose a framework for solving Distributionally robust MDPs via first-order methods, and instantiate it for several types of Wasserstein ambiguity sets. By developing efficient proximal updates, our algorithms achieve a convergence rate of $O\\left(NA^{2.5}S^{3.5}\\log(S)\\log(\\epsilon^{-1})\\epsilon^{-1.5} \\right)$ for the number of kernels $N$ in the support of the nominal distribution, states $S$, and actions $A$; this rate varies slightly based on the Wasserstein setup. Our dependence on $N,A$ and $S$ is significantly better than existing methods, which have a complexity of $O\\left(N^{3.5}A^{3.5}S^{4.5}\\log^{2}(\\epsilon^{-1}) \\right)$. Numerical experiments show that our algorithm is significantly more scalable than state-of-the-art approaches across several domains.}\n}", "pdf": "http://proceedings.mlr.press/v139/clement21a/clement21a.pdf", "supp": "", "pdf_size": 2626129, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2201901260493285879&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "IEOR Department, Columbia University; IEOR Department, Columbia University", "aff_domain": "columbia.edu; ", "email": "columbia.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/clement21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "IEOR Department", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Fixed-Parameter and Approximation Algorithms for PCA with Outliers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9215", "id": "9215", "proceeding": "http://proceedings.mlr.press/v139/dahiya21b.html", "slides": "", "author_site": "Yogesh Dahiya, Fedor Fomin, Fahad Panolan, Kirill Simonov", "author": "Yogesh Dahiya; Fedor Fomin; Fahad Panolan; Kirill Simonov", "abstract": "PCA with Outliers is the fundamental problem of identifying an underlying low-dimensional subspace in a data set corrupted with outliers. A large body of work is devoted to the information-theoretic aspects of this problem. However, from the computational perspective, its complexity is still not well-understood. We study this problem from the perspective of parameterized complexity by investigating how parameters like the dimension of the data, the subspace dimension, the number of outliers and their structure, and approximation error, influence the computational complexity of the problem. Our algorithmic methods are based on techniques of randomized linear algebra and algebraic geometry.", "bibtex": "@InProceedings{pmlr-v139-dahiya21b,\n title = \t {Fixed-Parameter and Approximation Algorithms for PCA with Outliers},\n author = {Dahiya, Yogesh and Fomin, Fedor and Panolan, Fahad and Simonov, Kirill},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2341--2351},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dahiya21b/dahiya21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/dahiya21b.html},\n abstract = \t {PCA with Outliers is the fundamental problem of identifying an underlying low-dimensional subspace in a data set corrupted with outliers. A large body of work is devoted to the information-theoretic aspects of this problem. However, from the computational perspective, its complexity is still not well-understood. We study this problem from the perspective of parameterized complexity by investigating how parameters like the dimension of the data, the subspace dimension, the number of outliers and their structure, and approximation error, influence the computational complexity of the problem. Our algorithmic methods are based on techniques of randomized linear algebra and algebraic geometry.}\n}", "pdf": "http://proceedings.mlr.press/v139/dahiya21b/dahiya21b.pdf", "supp": "", "pdf_size": 448299, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11895601089328431330&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "The Institute of Mathematical Sciences (HBNI), Chennai, India; Department of Informatics, University of Bergen, Norway; Department of Computer Science and Engineering, IIT Hyderabad, Hyderabad, Telangana, India; Department of Informatics, University of Bergen, Norway", "aff_domain": "imsc.res.in; ; ;gmail.com", "email": "imsc.res.in; ; ;gmail.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/dahiya21b.html", "aff_unique_index": "0;1;2;1", "aff_unique_norm": "Institute of Mathematical Sciences;University of Bergen;IIT Hyderabad", "aff_unique_dep": ";Department of Informatics;Department of Computer Science and Engineering", "aff_unique_url": ";https://www.uib.no;https://www.iith.ac.in", "aff_unique_abbr": "HBNI;;IIT Hyderabad", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Chennai;;Hyderabad", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "India;Norway" }, { "title": "Flow-based Attribution in Graphical Models: A Recursive Shapley Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10049", "id": "10049", "proceeding": "http://proceedings.mlr.press/v139/singal21a.html", "slides": "/media/icml-2021/Slides/10049.pdf", "author_site": "Raghav Singal, George Michailidis, Hoiyi Ng", "author": "Raghav Singal; George Michailidis; Hoiyi Ng", "abstract": "We study the attribution problem in a graphical model, wherein the objective is to quantify how the effect of changes at the source nodes propagates through the graph. We develop a model-agnostic flow-based attribution method, called recursive Shapley value (RSV). RSV generalizes a number of existing node-based methods and uniquely satisfies a set of flow-based axioms. In addition to admitting a natural characterization for linear models and facilitating mediation analysis for non-linear models, RSV satisfies a mix of desirable properties discussed in the recent literature, including implementation invariance, sensitivity, monotonicity, and affine scale invariance.", "bibtex": "@InProceedings{pmlr-v139-singal21a,\n title = \t {Flow-based Attribution in Graphical Models: A Recursive Shapley Approach},\n author = {Singal, Raghav and Michailidis, George and Ng, Hoiyi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9733--9743},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/singal21a/singal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/singal21a.html},\n abstract = \t {We study the attribution problem in a graphical model, wherein the objective is to quantify how the effect of changes at the source nodes propagates through the graph. We develop a model-agnostic flow-based attribution method, called recursive Shapley value (RSV). RSV generalizes a number of existing node-based methods and uniquely satisfies a set of flow-based axioms. In addition to admitting a natural characterization for linear models and facilitating mediation analysis for non-linear models, RSV satisfies a mix of desirable properties discussed in the recent literature, including implementation invariance, sensitivity, monotonicity, and affine scale invariance.}\n}", "pdf": "http://proceedings.mlr.press/v139/singal21a/singal21a.pdf", "supp": "", "pdf_size": 344903, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4932659550612858749&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Amazon; University of Florida + Amazon; Amazon", "aff_domain": "columbia.edu;ufl.edu;amazon.com", "email": "columbia.edu;ufl.edu;amazon.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/singal21a.html", "aff_unique_index": "0;1+0;0", "aff_unique_norm": "Amazon;University of Florida", "aff_unique_dep": "Amazon.com, Inc.;", "aff_unique_url": "https://www.amazon.com;https://www.ufl.edu", "aff_unique_abbr": "Amazon;UF", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "United States" }, { "title": "Fold2Seq: A Joint Sequence(1D)-Fold(3D) Embedding-based Generative Model for Protein Design", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9593", "id": "9593", "proceeding": "http://proceedings.mlr.press/v139/cao21a.html", "slides": "", "author_site": "yue cao, Payel Das, Vijil Chenthamarakshan, Pin-Yu Chen, Igor Melnyk, Yang Shen", "author": "Yue Cao; Payel Das; Vijil Chenthamarakshan; Pin-Yu Chen; Igor Melnyk; Yang Shen", "abstract": "Designing novel protein sequences for a desired 3D topological fold is a fundamental yet non-trivial task in protein engineering. Challenges exist due to the complex sequence\u2013fold relationship, as well as the difficulties to capture the diversity of the sequences (therefore structures and functions) within a fold. To overcome these challenges, we propose Fold2Seq, a novel transformer-based generative framework for designing protein sequences conditioned on a specific target fold. To model the complex sequence\u2013structure relationship, Fold2Seq jointly learns a sequence embedding using a transformer and a fold embedding from the density of secondary structural elements in 3D voxels. On test sets with single, high-resolution and complete structure inputs for individual folds, our experiments demonstrate improved or comparable performance of Fold2Seq in terms of speed, coverage, and reliability for sequence design, when compared to existing state-of-the-art methods that include data-driven deep generative models and physics-based RosettaDesign. The unique advantages of fold-based Fold2Seq, in comparison to a structure-based deep model and RosettaDesign, become more evident on three additional real-world challenges originating from low-quality, incomplete, or ambiguous input structures. Source code and data are available at https://github.com/IBM/fold2seq.", "bibtex": "@InProceedings{pmlr-v139-cao21a,\n title = \t {Fold2Seq: A Joint Sequence(1D)-Fold(3D) Embedding-based Generative Model for Protein Design},\n author = {Cao, Yue and Das, Payel and Chenthamarakshan, Vijil and Chen, Pin-Yu and Melnyk, Igor and Shen, Yang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1261--1271},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cao21a/cao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cao21a.html},\n abstract = \t {Designing novel protein sequences for a desired 3D topological fold is a fundamental yet non-trivial task in protein engineering. Challenges exist due to the complex sequence\u2013fold relationship, as well as the difficulties to capture the diversity of the sequences (therefore structures and functions) within a fold. To overcome these challenges, we propose Fold2Seq, a novel transformer-based generative framework for designing protein sequences conditioned on a specific target fold. To model the complex sequence\u2013structure relationship, Fold2Seq jointly learns a sequence embedding using a transformer and a fold embedding from the density of secondary structural elements in 3D voxels. On test sets with single, high-resolution and complete structure inputs for individual folds, our experiments demonstrate improved or comparable performance of Fold2Seq in terms of speed, coverage, and reliability for sequence design, when compared to existing state-of-the-art methods that include data-driven deep generative models and physics-based RosettaDesign. The unique advantages of fold-based Fold2Seq, in comparison to a structure-based deep model and RosettaDesign, become more evident on three additional real-world challenges originating from low-quality, incomplete, or ambiguous input structures. Source code and data are available at https://github.com/IBM/fold2seq.}\n}", "pdf": "http://proceedings.mlr.press/v139/cao21a/cao21a.pdf", "supp": "", "pdf_size": 2457540, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9442126458531954169&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "IBM Research+Texas A&M University; IBM Research; IBM Research; IBM Research; IBM Research; Texas A&M University", "aff_domain": "tamu.edu;us.ibm.com;us.ibm.com;ibm.com;ibm.com;tamu.edu", "email": "tamu.edu;us.ibm.com;us.ibm.com;ibm.com;ibm.com;tamu.edu", "github": "https://github.com/IBM/fold2seq", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cao21a.html", "aff_unique_index": "0+1;0;0;0;0;1", "aff_unique_norm": "IBM;Texas A&M University", "aff_unique_dep": "IBM Research;", "aff_unique_url": "https://www.ibm.com/research;https://www.tamu.edu", "aff_unique_abbr": "IBM;TAMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Follow-the-Regularized-Leader Routes to Chaos in Routing Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9621", "id": "9621", "proceeding": "http://proceedings.mlr.press/v139/bielawski21a.html", "slides": "", "author_site": "Jakub Bielawski, Thiparat Chotibut, Fryderyk Falniowski, Grzegorz Kosiorowski, Micha\u0142 Misiurewicz, Georgios Piliouras", "author": "Jakub Bielawski; Thiparat Chotibut; Fryderyk Falniowski; Grzegorz Kosiorowski; Micha\u0142 Misiurewicz; Georgios Piliouras", "abstract": "We study the emergence of chaotic behavior of Follow-the-Regularized Leader (FoReL) dynamics in games. We focus on the effects of increasing the population size or the scale of costs in congestion games, and generalize recent results on unstable, chaotic behaviors in the Multiplicative Weights Update dynamics to a much larger class of FoReL dynamics. We establish that, even in simple linear non-atomic congestion games with two parallel links and \\emph{any} fixed learning rate, unless the game is fully symmetric, increasing the population size or the scale of costs causes learning dynamics to becomes unstable and eventually chaotic, in the sense of Li-Yorke and positive topological entropy. Furthermore, we prove the existence of novel non-standard phenomena such as the coexistence of stable Nash equilibria and chaos in the same game. We also observe the simultaneous creation of a chaotic attractor as another chaotic attractor gets destroyed. Lastly, although FoReL dynamics can be strange and non-equilibrating, we prove that the time average still converges to an \\emph{exact} equilibrium for any choice of learning rate and any scale of costs.", "bibtex": "@InProceedings{pmlr-v139-bielawski21a,\n title = \t {Follow-the-Regularized-Leader Routes to Chaos in Routing Games},\n author = {Bielawski, Jakub and Chotibut, Thiparat and Falniowski, Fryderyk and Kosiorowski, Grzegorz and Misiurewicz, Micha{\\l} and Piliouras, Georgios},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {925--935},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bielawski21a/bielawski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bielawski21a.html},\n abstract = \t {We study the emergence of chaotic behavior of Follow-the-Regularized Leader (FoReL) dynamics in games. We focus on the effects of increasing the population size or the scale of costs in congestion games, and generalize recent results on unstable, chaotic behaviors in the Multiplicative Weights Update dynamics to a much larger class of FoReL dynamics. We establish that, even in simple linear non-atomic congestion games with two parallel links and \\emph{any} fixed learning rate, unless the game is fully symmetric, increasing the population size or the scale of costs causes learning dynamics to becomes unstable and eventually chaotic, in the sense of Li-Yorke and positive topological entropy. Furthermore, we prove the existence of novel non-standard phenomena such as the coexistence of stable Nash equilibria and chaos in the same game. We also observe the simultaneous creation of a chaotic attractor as another chaotic attractor gets destroyed. Lastly, although FoReL dynamics can be strange and non-equilibrating, we prove that the time average still converges to an \\emph{exact} equilibrium for any choice of learning rate and any scale of costs.}\n}", "pdf": "http://proceedings.mlr.press/v139/bielawski21a/bielawski21a.pdf", "supp": "", "pdf_size": 2232341, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13194391057511787447&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Mathematics, Cracow University of Economics; Chula Intelligent and Complex Systems, and Department of Physics, Faculty of Science, Chulalongkorn University; Department of Mathematics, Cracow University of Economics; Department of Mathematics, Cracow University of Economics; Department of Mathematical Sciences, Indiana University-Purdue University Indianapolis; Engineering Systems and Design, Singapore University of Technology and Design", "aff_domain": "sutd.edu.sg;chula.ac.th; ; ; ;", "email": "sutd.edu.sg;chula.ac.th; ; ; ;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/bielawski21a.html", "aff_unique_index": "0;1;0;0;2;3", "aff_unique_norm": "Cracow University of Economics;Chulalongkorn University;Indiana University-Purdue University Indianapolis;Singapore University of Technology and Design", "aff_unique_dep": "Department of Mathematics;Department of Physics;Department of Mathematical Sciences;Engineering Systems and Design", "aff_unique_url": "https://www.uek.krakow.pl;https://www.chula.ac.th;https://www.iupui.edu;https://www.sutd.edu.sg", "aff_unique_abbr": "UEK;Chula;IUPUI;SUTD", "aff_campus_unique_index": "0;0;0;2", "aff_campus_unique": "Cracow;;Indianapolis", "aff_country_unique_index": "0;1;0;0;2;3", "aff_country_unique": "Poland;Thailand;United States;Singapore" }, { "title": "From Local Structures to Size Generalization in Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10719", "id": "10719", "proceeding": "http://proceedings.mlr.press/v139/yehudai21a.html", "slides": "", "author_site": "Gilad Yehudai, Ethan Fetaya, Eli Meirom, Gal Chechik, Haggai Maron", "author": "Gilad Yehudai; Ethan Fetaya; Eli Meirom; Gal Chechik; Haggai Maron", "abstract": "Graph neural networks (GNNs) can process graphs of different sizes, but their ability to generalize across sizes, specifically from small to large graphs, is still not well understood. In this paper, we identify an important type of data where generalization from small to large graphs is challenging: graph distributions for which the local structure depends on the graph size. This effect occurs in multiple important graph learning domains, including social and biological networks. We first prove that when there is a difference between the local structures, GNNs are not guaranteed to generalize across sizes: there are \"bad\" global minima that do well on small graphs but fail on large graphs. We then study the size-generalization problem empirically and demonstrate that when there is a discrepancy in local structure, GNNs tend to converge to non-generalizing solutions. Finally, we suggest two approaches for improving size generalization, motivated by our findings. Notably, we propose a novel Self-Supervised Learning (SSL) task aimed at learning meaningful representations of local structures that appear in large graphs. Our SSL task improves classification accuracy on several popular datasets.", "bibtex": "@InProceedings{pmlr-v139-yehudai21a,\n title = \t {From Local Structures to Size Generalization in Graph Neural Networks},\n author = {Yehudai, Gilad and Fetaya, Ethan and Meirom, Eli and Chechik, Gal and Maron, Haggai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11975--11986},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yehudai21a/yehudai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yehudai21a.html},\n abstract = \t {Graph neural networks (GNNs) can process graphs of different sizes, but their ability to generalize across sizes, specifically from small to large graphs, is still not well understood. In this paper, we identify an important type of data where generalization from small to large graphs is challenging: graph distributions for which the local structure depends on the graph size. This effect occurs in multiple important graph learning domains, including social and biological networks. We first prove that when there is a difference between the local structures, GNNs are not guaranteed to generalize across sizes: there are \"bad\" global minima that do well on small graphs but fail on large graphs. We then study the size-generalization problem empirically and demonstrate that when there is a discrepancy in local structure, GNNs tend to converge to non-generalizing solutions. Finally, we suggest two approaches for improving size generalization, motivated by our findings. Notably, we propose a novel Self-Supervised Learning (SSL) task aimed at learning meaningful representations of local structures that appear in large graphs. Our SSL task improves classification accuracy on several popular datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/yehudai21a/yehudai21a.pdf", "supp": "", "pdf_size": 990833, "gs_citation": 161, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10829209358390562217&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "NVIDIA; Bar-Ilan University; NVIDIA; NVIDIA + Bar-Ilan University; NVIDIA", "aff_domain": "weizmann.ac.il; ; ; ; ", "email": "weizmann.ac.il; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yehudai21a.html", "aff_unique_index": "0;1;0;0+1;0", "aff_unique_norm": "NVIDIA;Bar-Ilan University", "aff_unique_dep": "NVIDIA Corporation;", "aff_unique_url": "https://www.nvidia.com;https://www.biu.ac.il", "aff_unique_abbr": "NVIDIA;BIU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0+1;0", "aff_country_unique": "United States;Israel" }, { "title": "From Local to Global Norm Emergence: Dissolving Self-reinforcing Substructures with Incremental Social Instruments", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10711", "id": "10711", "proceeding": "http://proceedings.mlr.press/v139/liu21n.html", "slides": "/media/icml-2021/Slides/10711.pdf", "author_site": "Yiwei Liu, Jiamou Liu, Kaibin Wan, Zhan Qin, Zijian Zhang, Bakhadyr Khoussainov, Liehuang Zhu", "author": "Yiwei Liu; Jiamou Liu; Kaibin Wan; Zhan Qin; Zijian Zhang; Bakhadyr Khoussainov; Liehuang Zhu", "abstract": "Norm emergence is a process where agents in a multi-agent system establish self-enforcing conformity through repeated interactions. When such interactions are confined to a social topology, several self-reinforcing substructures (SRS) may emerge within the population. This prevents a formation of a global norm. We propose incremental social instruments (ISI) to dissolve these SRSs by creating ties between agents. Establishing ties requires some effort and cost. Hence, it is worth to design methods that build a small number of ties yet dissolve the SRSs. By using the notion of information entropy, we propose an indicator called the BA-ratio that measures the current SRSs. We find that by building ties with minimal BA-ratio, our ISI is effective in facilitating the global norm emergence. We explain this through our experiments and theoretical results. Furthermore, we propose the small-degree principle in minimising the BA-ratio that helps us to design efficient ISI algorithms for finding the optimal ties. Experiments on both synthetic and real-world network topologies demonstrate that our adaptive ISI is efficient at dissolving SRS.", "bibtex": "@InProceedings{pmlr-v139-liu21n,\n title = \t {From Local to Global Norm Emergence: Dissolving Self-reinforcing Substructures with Incremental Social Instruments},\n author = {Liu, Yiwei and Liu, Jiamou and Wan, Kaibin and Qin, Zhan and Zhang, Zijian and Khoussainov, Bakhadyr and Zhu, Liehuang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6871--6881},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21n/liu21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21n.html},\n abstract = \t {Norm emergence is a process where agents in a multi-agent system establish self-enforcing conformity through repeated interactions. When such interactions are confined to a social topology, several self-reinforcing substructures (SRS) may emerge within the population. This prevents a formation of a global norm. We propose incremental social instruments (ISI) to dissolve these SRSs by creating ties between agents. Establishing ties requires some effort and cost. Hence, it is worth to design methods that build a small number of ties yet dissolve the SRSs. By using the notion of information entropy, we propose an indicator called the BA-ratio that measures the current SRSs. We find that by building ties with minimal BA-ratio, our ISI is effective in facilitating the global norm emergence. We explain this through our experiments and theoretical results. Furthermore, we propose the small-degree principle in minimising the BA-ratio that helps us to design efficient ISI algorithms for finding the optimal ties. Experiments on both synthetic and real-world network topologies demonstrate that our adaptive ISI is efficient at dissolving SRS.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21n/liu21n.pdf", "supp": "", "pdf_size": 2393987, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10642908751326510098&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/liu21n.html" }, { "title": "From Poincar\u00e9 Recurrence to Convergence in Imperfect Information Games: Finding Equilibrium via Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9903", "id": "9903", "proceeding": "http://proceedings.mlr.press/v139/perolat21a.html", "slides": "/media/icml-2021/Slides/9903_SQBiPqQ.pdf", "author_site": "Julien Perolat, Remi Munos, Jean-Baptiste Lespiau, Shayegan Omidshafiei, Mark Rowland, Pedro Ortega, Neil Burch, Thomas Anthony, David Balduzzi, Bart De Vylder, Georgios Piliouras, Marc Lanctot, Karl Tuyls", "author": "Julien Perolat; Remi Munos; Jean-Baptiste Lespiau; Shayegan Omidshafiei; Mark Rowland; Pedro Ortega; Neil Burch; Thomas Anthony; David Balduzzi; Bart De Vylder; Georgios Piliouras; Marc Lanctot; Karl Tuyls", "abstract": "In this paper we investigate the Follow the Regularized Leader dynamics in sequential imperfect information games (IIG). We generalize existing results of Poincar{\u00e9} recurrence from normal-form games to zero-sum two-player imperfect information games and other sequential game settings. We then investigate how adapting the reward (by adding a regularization term) of the game can give strong convergence guarantees in monotone games. We continue by showing how this reward adaptation technique can be leveraged to build algorithms that converge exactly to the Nash equilibrium. Finally, we show how these insights can be directly used to build state-of-the-art model-free algorithms for zero-sum two-player Imperfect Information Games (IIG).", "bibtex": "@InProceedings{pmlr-v139-perolat21a,\n title = \t {From Poincar{\u00e9} Recurrence to Convergence in Imperfect Information Games: Finding Equilibrium via Regularization},\n author = {Perolat, Julien and Munos, Remi and Lespiau, Jean-Baptiste and Omidshafiei, Shayegan and Rowland, Mark and Ortega, Pedro and Burch, Neil and Anthony, Thomas and Balduzzi, David and De Vylder, Bart and Piliouras, Georgios and Lanctot, Marc and Tuyls, Karl},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8525--8535},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/perolat21a/perolat21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/perolat21a.html},\n abstract = \t {In this paper we investigate the Follow the Regularized Leader dynamics in sequential imperfect information games (IIG). We generalize existing results of Poincar{\u00e9} recurrence from normal-form games to zero-sum two-player imperfect information games and other sequential game settings. We then investigate how adapting the reward (by adding a regularization term) of the game can give strong convergence guarantees in monotone games. We continue by showing how this reward adaptation technique can be leveraged to build algorithms that converge exactly to the Nash equilibrium. Finally, we show how these insights can be directly used to build state-of-the-art model-free algorithms for zero-sum two-player Imperfect Information Games (IIG).}\n}", "pdf": "http://proceedings.mlr.press/v139/perolat21a/perolat21a.pdf", "supp": "", "pdf_size": 730237, "gs_citation": 105, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13313801091973604516&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; SUTD; DeepMind; DeepMind", "aff_domain": "google.com; ; ; ; ; ; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 13, "oa": "https://proceedings.mlr.press/v139/perolat21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;1;0;0", "aff_unique_norm": "DeepMind;Singapore University of Technology and Design", "aff_unique_dep": ";", "aff_unique_url": "https://deepmind.com;https://www.sutd.edu.sg", "aff_unique_abbr": "DeepMind;SUTD", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;1;0;0", "aff_country_unique": "United Kingdom;Singapore" }, { "title": "Function Contrastive Learning of Transferable Meta-Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10763", "id": "10763", "proceeding": "http://proceedings.mlr.press/v139/gondal21a.html", "slides": "", "author_site": "Muhammad Waleed Gondal, Shruti Joshi, Nasim Rahaman, Stefan Bauer, Manuel Wuthrich, Bernhard Sch\u00f6lkopf", "author": "Muhammad Waleed Gondal; Shruti Joshi; Nasim Rahaman; Stefan Bauer; Manuel Wuthrich; Bernhard Sch\u00f6lkopf", "abstract": "Meta-learning algorithms adapt quickly to new tasks that are drawn from the same task distribution as the training tasks. The mechanism leading to fast adaptation is the conditioning of a downstream predictive model on the inferred representation of the task\u2019s underlying data generative process, or \\emph{function}. This \\emph{meta-representation}, which is computed from a few observed examples of the underlying function, is learned jointly with the predictive model. In this work, we study the implications of this joint training on the transferability of the meta-representations. Our goal is to learn meta-representations that are robust to noise in the data and facilitate solving a wide range of downstream tasks that share the same underlying functions. To this end, we propose a decoupled encoder-decoder approach to supervised meta-learning, where the encoder is trained with a contrastive objective to find a good representation of the underlying function. In particular, our training scheme is driven by the self-supervision signal indicating whether two sets of examples stem from the same function. Our experiments on a number of synthetic and real-world datasets show that the representations we obtain outperform strong baselines in terms of downstream performance and noise robustness, even when these baselines are trained in an end-to-end manner.", "bibtex": "@InProceedings{pmlr-v139-gondal21a,\n title = \t {Function Contrastive Learning of Transferable Meta-Representations},\n author = {Gondal, Muhammad Waleed and Joshi, Shruti and Rahaman, Nasim and Bauer, Stefan and Wuthrich, Manuel and Sch{\\\"o}lkopf, Bernhard},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3755--3765},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gondal21a/gondal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gondal21a.html},\n abstract = \t {Meta-learning algorithms adapt quickly to new tasks that are drawn from the same task distribution as the training tasks. The mechanism leading to fast adaptation is the conditioning of a downstream predictive model on the inferred representation of the task\u2019s underlying data generative process, or \\emph{function}. This \\emph{meta-representation}, which is computed from a few observed examples of the underlying function, is learned jointly with the predictive model. In this work, we study the implications of this joint training on the transferability of the meta-representations. Our goal is to learn meta-representations that are robust to noise in the data and facilitate solving a wide range of downstream tasks that share the same underlying functions. To this end, we propose a decoupled encoder-decoder approach to supervised meta-learning, where the encoder is trained with a contrastive objective to find a good representation of the underlying function. In particular, our training scheme is driven by the self-supervision signal indicating whether two sets of examples stem from the same function. Our experiments on a number of synthetic and real-world datasets show that the representations we obtain outperform strong baselines in terms of downstream performance and noise robustness, even when these baselines are trained in an end-to-end manner.}\n}", "pdf": "http://proceedings.mlr.press/v139/gondal21a/gondal21a.pdf", "supp": "", "pdf_size": 3671543, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11694021951703735753&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Max Planck Institute for Intelligent Systems, Tubingen, Germany+Mila, University of Montreal, Montreal, Canada; Max Planck Institute for Intelligent Systems, Tubingen, Germany; Max Planck Institute for Intelligent Systems, Tubingen, Germany+Mila, University of Montreal, Montreal, Canada; Max Planck Institute for Intelligent Systems, Tubingen, Germany+CIFAR Azrieli Global Scholar; Max Planck Institute for Intelligent Systems, Tubingen, Germany; Max Planck Institute for Intelligent Systems, Tubingen, Germany", "aff_domain": "tue.mpg.de; ; ; ; ; ", "email": "tue.mpg.de; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/gondal21a.html", "aff_unique_index": "0+1;0;0+1;0+2;0;0", "aff_unique_norm": "Max Planck Institute for Intelligent Systems;University of Montreal;CIFAR", "aff_unique_dep": ";Mila;Azrieli Global Scholar", "aff_unique_url": "https://www.mpi-is.mpg.de;https://www.mila.quebec;https://www.cifar.ca", "aff_unique_abbr": "MPI-IS;Mila;CIFAR", "aff_campus_unique_index": "0+1;0;0+1;0;0;0", "aff_campus_unique": "Tubingen;Montreal;", "aff_country_unique_index": "0+1;0;0+1;0+1;0;0", "aff_country_unique": "Germany;Canada" }, { "title": "Functional Space Analysis of Local GAN Convergence", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10305", "id": "10305", "proceeding": "http://proceedings.mlr.press/v139/khrulkov21a.html", "slides": "", "author_site": "Valentin Khrulkov, Artem Babenko, Ivan Oseledets", "author": "Valentin Khrulkov; Artem Babenko; Ivan Oseledets", "abstract": "Recent work demonstrated the benefits of studying continuous-time dynamics governing the GAN training. However, this dynamics is analyzed in the model parameter space, which results in finite-dimensional dynamical systems. We propose a novel perspective where we study the local dynamics of adversarial training in the general functional space and show how it can be represented as a system of partial differential equations. Thus, the convergence properties can be inferred from the eigenvalues of the resulting differential operator. We show that these eigenvalues can be efficiently estimated from the target dataset before training. Our perspective reveals several insights on the practical tricks commonly used to stabilize GANs, such as gradient penalty, data augmentation, and advanced integration schemes. As an immediate practical benefit, we demonstrate how one can a priori select an optimal data augmentation strategy for a particular generation task.", "bibtex": "@InProceedings{pmlr-v139-khrulkov21a,\n title = \t {Functional Space Analysis of Local GAN Convergence},\n author = {Khrulkov, Valentin and Babenko, Artem and Oseledets, Ivan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5432--5442},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/khrulkov21a/khrulkov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/khrulkov21a.html},\n abstract = \t {Recent work demonstrated the benefits of studying continuous-time dynamics governing the GAN training. However, this dynamics is analyzed in the model parameter space, which results in finite-dimensional dynamical systems. We propose a novel perspective where we study the local dynamics of adversarial training in the general functional space and show how it can be represented as a system of partial differential equations. Thus, the convergence properties can be inferred from the eigenvalues of the resulting differential operator. We show that these eigenvalues can be efficiently estimated from the target dataset before training. Our perspective reveals several insights on the practical tricks commonly used to stabilize GANs, such as gradient penalty, data augmentation, and advanced integration schemes. As an immediate practical benefit, we demonstrate how one can a priori select an optimal data augmentation strategy for a particular generation task.}\n}", "pdf": "http://proceedings.mlr.press/v139/khrulkov21a/khrulkov21a.pdf", "supp": "", "pdf_size": 5021280, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9573645358381315708&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Yandex, Russia; Yandex, Russia + National Research University Higher School of Economics, Moscow, Russia; Skolkovo Institute of Science and Technology, Moscow, Russia", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/khrulkov21a.html", "aff_unique_index": "0;0+1;2", "aff_unique_norm": "Yandex;National Research University Higher School of Economics;Skolkovo Institute of Science and Technology", "aff_unique_dep": ";;", "aff_unique_url": "https://yandex.com;https://www.hse.ru;https://www.skoltech.ru", "aff_unique_abbr": "Yandex;HSE;Skoltech", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Moscow", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "Russian Federation" }, { "title": "Fundamental Tradeoffs in Distributionally Adversarial Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8907", "id": "8907", "proceeding": "http://proceedings.mlr.press/v139/mehrabi21a.html", "slides": "/media/icml-2021/Slides/8907.pdf", "author_site": "Mohammad Mehrabi, Adel Javanmard, Ryan A. Rossi, Anup Rao, Tung Mai", "author": "Mohammad Mehrabi; Adel Javanmard; Ryan A. Rossi; Anup Rao; Tung Mai", "abstract": "Adversarial training is among the most effective techniques to improve robustness of models against adversarial perturbations. However, the full effect of this approach on models is not well understood. For example, while adversarial training can reduce the adversarial risk (prediction error against an adversary), it sometimes increase standard risk (generalization error when there is no adversary). In this paper, we focus on \\emph{distribution perturbing} adversary framework wherein the adversary can change the test distribution within a neighborhood of the training data distribution. The neighborhood is defined via Wasserstein distance between distributions and the radius of the neighborhood is a measure of adversary\u2019s manipulative power. We study the tradeoff between standard risk and adversarial risk and derive the Pareto-optimal tradeoff, achievable over specific classes of models, in the infinite data limit with features dimension kept fixed. We consider three learning settings: 1) Regression with the class of linear models; 2) Binary classification under the Gaussian mixtures data model, with the class of linear classifiers; 3) Regression with the class of random features model (which can be equivalently represented as two-layer neural network with random first-layer weights). We show that a tradeoff between standard and adversarial risk is manifested in all three settings. We further characterize the Pareto-optimal tradeoff curves and discuss how a variety of factors, such as features correlation, adversary\u2019s power or the width of two-layer neural network would affect this tradeoff.", "bibtex": "@InProceedings{pmlr-v139-mehrabi21a,\n title = \t {Fundamental Tradeoffs in Distributionally Adversarial Training},\n author = {Mehrabi, Mohammad and Javanmard, Adel and Rossi, Ryan A. and Rao, Anup and Mai, Tung},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7544--7554},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mehrabi21a/mehrabi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mehrabi21a.html},\n abstract = \t {Adversarial training is among the most effective techniques to improve robustness of models against adversarial perturbations. However, the full effect of this approach on models is not well understood. For example, while adversarial training can reduce the adversarial risk (prediction error against an adversary), it sometimes increase standard risk (generalization error when there is no adversary). In this paper, we focus on \\emph{distribution perturbing} adversary framework wherein the adversary can change the test distribution within a neighborhood of the training data distribution. The neighborhood is defined via Wasserstein distance between distributions and the radius of the neighborhood is a measure of adversary\u2019s manipulative power. We study the tradeoff between standard risk and adversarial risk and derive the Pareto-optimal tradeoff, achievable over specific classes of models, in the infinite data limit with features dimension kept fixed. We consider three learning settings: 1) Regression with the class of linear models; 2) Binary classification under the Gaussian mixtures data model, with the class of linear classifiers; 3) Regression with the class of random features model (which can be equivalently represented as two-layer neural network with random first-layer weights). We show that a tradeoff between standard and adversarial risk is manifested in all three settings. We further characterize the Pareto-optimal tradeoff curves and discuss how a variety of factors, such as features correlation, adversary\u2019s power or the width of two-layer neural network would affect this tradeoff.}\n}", "pdf": "http://proceedings.mlr.press/v139/mehrabi21a/mehrabi21a.pdf", "supp": "", "pdf_size": 765850, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8379987003039055392&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Data Sciences and Operations, USC Marshall School of Business, University of Southern California, USA; Department of Data Sciences and Operations, USC Marshall School of Business, University of Southern California, USA; Adobe Research, USA; Adobe Research, USA; Adobe Research, USA", "aff_domain": "usc.edu;usc.edu;adobe.com;adobe.com;adobe.com", "email": "usc.edu;usc.edu;adobe.com;adobe.com;adobe.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mehrabi21a.html", "aff_unique_index": "0;0;1;1;1", "aff_unique_norm": "University of Southern California;Adobe", "aff_unique_dep": "Department of Data Sciences and Operations;Adobe Research", "aff_unique_url": "https://www.usc.edu;https://research.adobe.com", "aff_unique_abbr": "USC;Adobe", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fused Acoustic and Text Encoding for Multimodal Bilingual Pretraining and Speech Translation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10225", "id": "10225", "proceeding": "http://proceedings.mlr.press/v139/zheng21a.html", "slides": "", "author_site": "Renjie Zheng, Junkun Chen, Mingbo Ma, Liang Huang", "author": "Renjie Zheng; Junkun Chen; Mingbo Ma; Liang Huang", "abstract": "Recently, representation learning for text and speech has successfully improved many language related tasks. However, all existing methods suffer from two limitations: (a) they only learn from one input modality, while a unified representation for both speech and text is needed by tasks such as end-to-end speech translation, and as a result, (b) they can not exploit various large-scale text and speech data and their performance is limited by the scarcity of parallel speech translation data. To address these problems, we propose a Fused Acoustic and Text Masked Language Model (FAT-MLM) which jointly learns a unified representation for both acoustic and text input from various types of corpora including parallel data for speech recognition and machine translation, and even pure speech and text data. Within this cross-modal representation learning framework, we further present an end-to-end model for Fused Acoustic and Text Speech Translation (FAT-ST). Experiments on three translation directions show that by fine-tuning from FAT-MLM, our proposed speech translation models substantially improve translation quality by up to +5.9 BLEU.", "bibtex": "@InProceedings{pmlr-v139-zheng21a,\n title = \t {Fused Acoustic and Text Encoding for Multimodal Bilingual Pretraining and Speech Translation},\n author = {Zheng, Renjie and Chen, Junkun and Ma, Mingbo and Huang, Liang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12736--12746},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zheng21a/zheng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zheng21a.html},\n abstract = \t {Recently, representation learning for text and speech has successfully improved many language related tasks. However, all existing methods suffer from two limitations: (a) they only learn from one input modality, while a unified representation for both speech and text is needed by tasks such as end-to-end speech translation, and as a result, (b) they can not exploit various large-scale text and speech data and their performance is limited by the scarcity of parallel speech translation data. To address these problems, we propose a Fused Acoustic and Text Masked Language Model (FAT-MLM) which jointly learns a unified representation for both acoustic and text input from various types of corpora including parallel data for speech recognition and machine translation, and even pure speech and text data. Within this cross-modal representation learning framework, we further present an end-to-end model for Fused Acoustic and Text Speech Translation (FAT-ST). Experiments on three translation directions show that by fine-tuning from FAT-MLM, our proposed speech translation models substantially improve translation quality by up to +5.9 BLEU.}\n}", "pdf": "http://proceedings.mlr.press/v139/zheng21a/zheng21a.pdf", "supp": "", "pdf_size": 3319720, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10671660408216056807&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Baidu Research, Sunnyvale, CA, USA+Oregon State University, Corvallis, OR, USA; Baidu Research, Sunnyvale, CA, USA+Oregon State University, Corvallis, OR, USA; Baidu Research, Sunnyvale, CA, USA; Baidu Research, Sunnyvale, CA, USA+Oregon State University, Corvallis, OR, USA", "aff_domain": "baidu.com; ; ; ", "email": "baidu.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zheng21a.html", "aff_unique_index": "0+1;0+1;0;0+1", "aff_unique_norm": "Baidu;Oregon State University", "aff_unique_dep": "Research;", "aff_unique_url": "https://research.baidu.com;https://oregonstate.edu", "aff_unique_abbr": "Baidu Res.;OSU", "aff_campus_unique_index": "0+1;0+1;0;0+1", "aff_campus_unique": "Sunnyvale;Corvallis", "aff_country_unique_index": "0+0;0+0;0;0+0", "aff_country_unique": "United States" }, { "title": "GANMEX: One-vs-One Attributions using GAN-based Model Explainability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9015", "id": "9015", "proceeding": "http://proceedings.mlr.press/v139/shih21a.html", "slides": "/media/icml-2021/Slides/9015.pdf", "author_site": "Sheng-Min Shih, Pin-Ju Tien, Zohar Karnin", "author": "Sheng-Min Shih; Pin-Ju Tien; Zohar Karnin", "abstract": "Attribution methods have been shown as promising approaches for identifying key features that led to learned model predictions. While most existing attribution methods rely on a baseline input for performing feature perturbations, limited research has been conducted to address the baseline selection issues. Poor choices of baselines limit the ability of one-vs-one explanations for multi-class classifiers, which means the attribution methods were not able to explain why an input belongs to its original class but not the other specified target class. Achieving one-vs-one explanation is crucial when certain classes are more similar than others, e.g. two bird types among multiple animals, by focusing on key differentiating features rather than shared features across classes. In this paper, we present GANMEX, a novel approach applying Generative Adversarial Networks (GAN) by incorporating the to-be-explained classifier as part of the adversarial networks. Our approach effectively selects the baseline as the closest realistic sample belong to the target class, which allows attribution methods to provide true one-vs-one explanations. We showed that GANMEX baselines improved the saliency maps and led to stronger performance on multiple evaluation metrics over the existing baselines. Existing attribution results are known for being insensitive to model randomization, and we demonstrated that GANMEX baselines led to better outcome under the cascading randomization of the model.", "bibtex": "@InProceedings{pmlr-v139-shih21a,\n title = \t {GANMEX: One-vs-One Attributions using GAN-based Model Explainability},\n author = {Shih, Sheng-Min and Tien, Pin-Ju and Karnin, Zohar},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9592--9602},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shih21a/shih21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shih21a.html},\n abstract = \t {Attribution methods have been shown as promising approaches for identifying key features that led to learned model predictions. While most existing attribution methods rely on a baseline input for performing feature perturbations, limited research has been conducted to address the baseline selection issues. Poor choices of baselines limit the ability of one-vs-one explanations for multi-class classifiers, which means the attribution methods were not able to explain why an input belongs to its original class but not the other specified target class. Achieving one-vs-one explanation is crucial when certain classes are more similar than others, e.g. two bird types among multiple animals, by focusing on key differentiating features rather than shared features across classes. In this paper, we present GANMEX, a novel approach applying Generative Adversarial Networks (GAN) by incorporating the to-be-explained classifier as part of the adversarial networks. Our approach effectively selects the baseline as the closest realistic sample belong to the target class, which allows attribution methods to provide true one-vs-one explanations. We showed that GANMEX baselines improved the saliency maps and led to stronger performance on multiple evaluation metrics over the existing baselines. Existing attribution results are known for being insensitive to model randomization, and we demonstrated that GANMEX baselines led to better outcome under the cascading randomization of the model.}\n}", "pdf": "http://proceedings.mlr.press/v139/shih21a/shih21a.pdf", "supp": "", "pdf_size": 4743373, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15516194186629580208&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Amazon; Amazon; Amazon", "aff_domain": "gmail.com;gmail.com;gmail.com", "email": "gmail.com;gmail.com;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/shih21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Amazon", "aff_unique_dep": "Amazon.com, Inc.", "aff_unique_url": "https://www.amazon.com", "aff_unique_abbr": "Amazon", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "GBHT: Gradient Boosting Histogram Transform for Density Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8481", "id": "8481", "proceeding": "http://proceedings.mlr.press/v139/cui21c.html", "slides": "/media/icml-2021/Slides/8481.pdf", "author_site": "Jingyi Cui, Hanyuan Hang, Yisen Wang, Zhouchen Lin", "author": "Jingyi Cui; Hanyuan Hang; Yisen Wang; Zhouchen Lin", "abstract": "In this paper, we propose a density estimation algorithm called \\textit{Gradient Boosting Histogram Transform} (GBHT), where we adopt the \\textit{Negative Log Likelihood} as the loss function to make the boosting procedure available for the unsupervised tasks. From a learning theory viewpoint, we first prove fast convergence rates for GBHT with the smoothness assumption that the underlying density function lies in the space $C^{0,\\alpha}$. Then when the target density function lies in spaces $C^{1,\\alpha}$, we present an upper bound for GBHT which is smaller than the lower bound of its corresponding base learner, in the sense of convergence rates. To the best of our knowledge, we make the first attempt to theoretically explain why boosting can enhance the performance of its base learners for density estimation problems. In experiments, we not only conduct performance comparisons with the widely used KDE, but also apply GBHT to anomaly detection to showcase a further application of GBHT.", "bibtex": "@InProceedings{pmlr-v139-cui21c,\n title = \t {GBHT: Gradient Boosting Histogram Transform for Density Estimation},\n author = {Cui, Jingyi and Hang, Hanyuan and Wang, Yisen and Lin, Zhouchen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2233--2243},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cui21c/cui21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/cui21c.html},\n abstract = \t {In this paper, we propose a density estimation algorithm called \\textit{Gradient Boosting Histogram Transform} (GBHT), where we adopt the \\textit{Negative Log Likelihood} as the loss function to make the boosting procedure available for the unsupervised tasks. From a learning theory viewpoint, we first prove fast convergence rates for GBHT with the smoothness assumption that the underlying density function lies in the space $C^{0,\\alpha}$. Then when the target density function lies in spaces $C^{1,\\alpha}$, we present an upper bound for GBHT which is smaller than the lower bound of its corresponding base learner, in the sense of convergence rates. To the best of our knowledge, we make the first attempt to theoretically explain why boosting can enhance the performance of its base learners for density estimation problems. In experiments, we not only conduct performance comparisons with the widely used KDE, but also apply GBHT to anomaly detection to showcase a further application of GBHT.}\n}", "pdf": "http://proceedings.mlr.press/v139/cui21c/cui21c.pdf", "supp": "", "pdf_size": 1102068, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12614114366715223664&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China; Department of Applied Mathematics, University of Twente, The Netherlands; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China + Pazhou Lab, Guangzhou, China; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China + Pazhou Lab, Guangzhou, China", "aff_domain": "pku.edu.cn; ;pku.edu.cn;pku.edu.cn", "email": "pku.edu.cn; ;pku.edu.cn;pku.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cui21c.html", "aff_unique_index": "0;1;0+2;0+2", "aff_unique_norm": "Peking University;University of Twente;Pazhou Lab", "aff_unique_dep": "School of EECS;Department of Applied Mathematics;", "aff_unique_url": "http://www.pku.edu.cn;https://www.utwente.nl;", "aff_unique_abbr": "Peking U;;", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Guangzhou", "aff_country_unique_index": "0;1;0+0;0+0", "aff_country_unique": "China;Netherlands" }, { "title": "GLSearch: Maximum Common Subgraph Detection via Learning to Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10447", "id": "10447", "proceeding": "http://proceedings.mlr.press/v139/bai21e.html", "slides": "/media/icml-2021/Slides/10447.pdf", "author_site": "Yunsheng Bai, Derek Xu, Yizhou Sun, Wei Wang", "author": "Yunsheng Bai; Derek Xu; Yizhou Sun; Wei Wang", "abstract": "Detecting the Maximum Common Subgraph (MCS) between two input graphs is fundamental for applications in drug synthesis, malware detection, cloud computing, etc. However, MCS computation is NP-hard, and state-of-the-art MCS solvers rely on heuristic search algorithms which in practice cannot find good solution for large graph pairs given a limited computation budget. We propose GLSearch, a Graph Neural Network (GNN) based learning to search model. Our model is built upon the branch and bound algorithm, which selects one pair of nodes from the two input graphs to expand at a time. We propose a novel GNN-based Deep Q-Network (DQN) to select the node pair, making the search process much faster. Experiments on synthetic and real-world graph pairs demonstrate that our model learns a search strategy that is able to detect significantly larger common subgraphs than existing MCS solvers given the same computation budget. GLSearch can be potentially extended to solve many other combinatorial problems with constraints on graphs.", "bibtex": "@InProceedings{pmlr-v139-bai21e,\n title = \t {GLSearch: Maximum Common Subgraph Detection via Learning to Search},\n author = {Bai, Yunsheng and Xu, Derek and Sun, Yizhou and Wang, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {588--598},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bai21e/bai21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/bai21e.html},\n abstract = \t {Detecting the Maximum Common Subgraph (MCS) between two input graphs is fundamental for applications in drug synthesis, malware detection, cloud computing, etc. However, MCS computation is NP-hard, and state-of-the-art MCS solvers rely on heuristic search algorithms which in practice cannot find good solution for large graph pairs given a limited computation budget. We propose GLSearch, a Graph Neural Network (GNN) based learning to search model. Our model is built upon the branch and bound algorithm, which selects one pair of nodes from the two input graphs to expand at a time. We propose a novel GNN-based Deep Q-Network (DQN) to select the node pair, making the search process much faster. Experiments on synthetic and real-world graph pairs demonstrate that our model learns a search strategy that is able to detect significantly larger common subgraphs than existing MCS solvers given the same computation budget. GLSearch can be potentially extended to solve many other combinatorial problems with constraints on graphs.}\n}", "pdf": "http://proceedings.mlr.press/v139/bai21e/bai21e.pdf", "supp": "", "pdf_size": 675917, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4905588803454281209&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of California, Los Angeles, California, USA; Department of Computer Science, University of California, Los Angeles, California, USA; Department of Computer Science, University of California, Los Angeles, California, USA; Department of Computer Science, University of California, Los Angeles, California, USA", "aff_domain": "ucla.edu;ucla.edu; ; ", "email": "ucla.edu;ucla.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/bai21e.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "GMAC: A Distributional Perspective on Actor-Critic Framework", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8971", "id": "8971", "proceeding": "http://proceedings.mlr.press/v139/nam21a.html", "slides": "/media/icml-2021/Slides/8971.pdf", "author_site": "Daniel Nam, Younghoon Kim, Chan Youn Park", "author": "Daniel W Nam; Younghoon Kim; Chan Y Park", "abstract": "In this paper, we devise a distributional framework on actor-critic as a solution to distributional instability, action type restriction, and conflation between samples and statistics. We propose a new method that minimizes the Cram{\u00e9}r distance with the multi-step Bellman target distribution generated from a novel Sample-Replacement algorithm denoted SR(\\lambda), which learns the correct value distribution under multiple Bellman operations. Parameterizing a value distribution with Gaussian Mixture Model further improves the efficiency and the performance of the method, which we name GMAC. We empirically show that GMAC captures the correct representation of value distributions and improves the performance of a conventional actor-critic method with low computational cost, in both discrete and continuous action spaces using Arcade Learning Environment (ALE) and PyBullet environment.", "bibtex": "@InProceedings{pmlr-v139-nam21a,\n title = \t {GMAC: A Distributional Perspective on Actor-Critic Framework},\n author = {Nam, Daniel W and Kim, Younghoon and Park, Chan Y},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7927--7936},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nam21a/nam21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nam21a.html},\n abstract = \t {In this paper, we devise a distributional framework on actor-critic as a solution to distributional instability, action type restriction, and conflation between samples and statistics. We propose a new method that minimizes the Cram{\u00e9}r distance with the multi-step Bellman target distribution generated from a novel Sample-Replacement algorithm denoted SR(\\lambda), which learns the correct value distribution under multiple Bellman operations. Parameterizing a value distribution with Gaussian Mixture Model further improves the efficiency and the performance of the method, which we name GMAC. We empirically show that GMAC captures the correct representation of value distributions and improves the performance of a conventional actor-critic method with low computational cost, in both discrete and continuous action spaces using Arcade Learning Environment (ALE) and PyBullet environment.}\n}", "pdf": "http://proceedings.mlr.press/v139/nam21a/nam21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16714448644175572802&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nam21a.html" }, { "title": "GNNAutoScale: Scalable and Expressive Graph Neural Networks via Historical Embeddings", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10757", "id": "10757", "proceeding": "http://proceedings.mlr.press/v139/fey21a.html", "slides": "/media/icml-2021/Slides/10757.pdf", "author_site": "Matthias Fey, Jan Eric Lenssen, Frank Weichert, Jure Leskovec", "author": "Matthias Fey; Jan E. Lenssen; Frank Weichert; Jure Leskovec", "abstract": "We present GNNAutoScale (GAS), a framework for scaling arbitrary message-passing GNNs to large graphs. GAS prunes entire sub-trees of the computation graph by utilizing historical embeddings from prior training iterations, leading to constant GPU memory consumption in respect to input node size without dropping any data. While existing solutions weaken the expressive power of message passing due to sub-sampling of edges or non-trainable propagations, our approach is provably able to maintain the expressive power of the original GNN. We achieve this by providing approximation error bounds of historical embeddings and show how to tighten them in practice. Empirically, we show that the practical realization of our framework, PyGAS, an easy-to-use extension for PyTorch Geometric, is both fast and memory-efficient, learns expressive node representations, closely resembles the performance of their non-scaling counterparts, and reaches state-of-the-art performance on large-scale graphs.", "bibtex": "@InProceedings{pmlr-v139-fey21a,\n title = \t {GNNAutoScale: Scalable and Expressive Graph Neural Networks via Historical Embeddings},\n author = {Fey, Matthias and Lenssen, Jan E. and Weichert, Frank and Leskovec, Jure},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3294--3304},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fey21a/fey21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fey21a.html},\n abstract = \t {We present GNNAutoScale (GAS), a framework for scaling arbitrary message-passing GNNs to large graphs. GAS prunes entire sub-trees of the computation graph by utilizing historical embeddings from prior training iterations, leading to constant GPU memory consumption in respect to input node size without dropping any data. While existing solutions weaken the expressive power of message passing due to sub-sampling of edges or non-trainable propagations, our approach is provably able to maintain the expressive power of the original GNN. We achieve this by providing approximation error bounds of historical embeddings and show how to tighten them in practice. Empirically, we show that the practical realization of our framework, PyGAS, an easy-to-use extension for PyTorch Geometric, is both fast and memory-efficient, learns expressive node representations, closely resembles the performance of their non-scaling counterparts, and reaches state-of-the-art performance on large-scale graphs.}\n}", "pdf": "http://proceedings.mlr.press/v139/fey21a/fey21a.pdf", "supp": "", "pdf_size": 379586, "gs_citation": 201, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4526974256428451675&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, TU Dortmund University; Department of Computer Science, TU Dortmund University; Department of Computer Science, TU Dortmund University; Department of Computer Science, Stanford University", "aff_domain": "udo.edu; ; ; ", "email": "udo.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fey21a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "TU Dortmund University;Stanford University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.tu-dortmund.de;https://www.stanford.edu", "aff_unique_abbr": "TU Dortmund;Stanford", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Dortmund;Stanford", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "Germany;United States" }, { "title": "GP-Tree: A Gaussian Process Classifier for Few-Shot Incremental Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9129", "id": "9129", "proceeding": "http://proceedings.mlr.press/v139/achituve21a.html", "slides": "/media/icml-2021/Slides/9129.pdf", "author_site": "Idan Achituve, Aviv Navon, Yochai Yemini, Gal Chechik, Ethan Fetaya", "author": "Idan Achituve; Aviv Navon; Yochai Yemini; Gal Chechik; Ethan Fetaya", "abstract": "Gaussian processes (GPs) are non-parametric, flexible, models that work well in many tasks. Combining GPs with deep learning methods via deep kernel learning (DKL) is especially compelling due to the strong representational power induced by the network. However, inference in GPs, whether with or without DKL, can be computationally challenging on large datasets. Here, we propose GP-Tree, a novel method for multi-class classification with Gaussian processes and DKL. We develop a tree-based hierarchical model in which each internal node of the tree fits a GP to the data using the P{\u00f3}lya-Gamma augmentation scheme. As a result, our method scales well with both the number of classes and data size. We demonstrate the effectiveness of our method against other Gaussian process training baselines, and we show how our general GP approach achieves improved accuracy on standard incremental few-shot learning benchmarks.", "bibtex": "@InProceedings{pmlr-v139-achituve21a,\n title = \t {GP-Tree: A Gaussian Process Classifier for Few-Shot Incremental Learning},\n author = {Achituve, Idan and Navon, Aviv and Yemini, Yochai and Chechik, Gal and Fetaya, Ethan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {54--65},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/achituve21a/achituve21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/achituve21a.html},\n abstract = \t {Gaussian processes (GPs) are non-parametric, flexible, models that work well in many tasks. Combining GPs with deep learning methods via deep kernel learning (DKL) is especially compelling due to the strong representational power induced by the network. However, inference in GPs, whether with or without DKL, can be computationally challenging on large datasets. Here, we propose GP-Tree, a novel method for multi-class classification with Gaussian processes and DKL. We develop a tree-based hierarchical model in which each internal node of the tree fits a GP to the data using the P{\u00f3}lya-Gamma augmentation scheme. As a result, our method scales well with both the number of classes and data size. We demonstrate the effectiveness of our method against other Gaussian process training baselines, and we show how our general GP approach achieves improved accuracy on standard incremental few-shot learning benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/achituve21a/achituve21a.pdf", "supp": "", "pdf_size": 2584782, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3252666331118779321&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Bar-Ilan University, Israel; Bar-Ilan University, Israel; Bar-Ilan University, Israel; Bar-Ilan University, Israel + Nvidia, Israel; Bar-Ilan University, Israel", "aff_domain": "biu.ac.il; ; ; ; ", "email": "biu.ac.il; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/achituve21a.html", "aff_unique_index": "0;0;0;0+1;0", "aff_unique_norm": "Bar-Ilan University;NVIDIA", "aff_unique_dep": ";Nvidia", "aff_unique_url": "https://www.biu.ac.il;https://www.nvidia.com", "aff_unique_abbr": "BIU;NVDA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0+0;0", "aff_country_unique": "Israel" }, { "title": "GRAD-MATCH: Gradient Matching based Data Subset Selection for Efficient Deep Model Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9861", "id": "9861", "proceeding": "http://proceedings.mlr.press/v139/killamsetty21a.html", "slides": "/media/icml-2021/Slides/9861.pdf", "author_site": "Krishnateja Killamsetty, Durga S, Ganesh Ramakrishnan, Abir De, Rishabh Iyer", "author": "Krishnateja Killamsetty; Durga S; Ganesh Ramakrishnan; Abir De; Rishabh Iyer", "abstract": "The great success of modern machine learning models on large datasets is contingent on extensive computational resources with high financial and environmental costs. One way to address this is by extracting subsets that generalize on par with the full data. In this work, we propose a general framework, GRAD-MATCH, which finds subsets that closely match the gradient of the \\emph{training or validation} set. We find such subsets effectively using an orthogonal matching pursuit algorithm. We show rigorous theoretical and convergence guarantees of the proposed algorithm and, through our extensive experiments on real-world datasets, show the effectiveness of our proposed framework. We show that GRAD-MATCH significantly and consistently outperforms several recent data-selection algorithms and achieves the best accuracy-efficiency trade-off. GRAD-MATCH is available as a part of the CORDS toolkit: \\url{https://github.com/decile-team/cords}.", "bibtex": "@InProceedings{pmlr-v139-killamsetty21a,\n title = \t {GRAD-MATCH: Gradient Matching based Data Subset Selection for Efficient Deep Model Training},\n author = {Killamsetty, Krishnateja and S, Durga and Ramakrishnan, Ganesh and De, Abir and Iyer, Rishabh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5464--5474},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/killamsetty21a/killamsetty21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/killamsetty21a.html},\n abstract = \t {The great success of modern machine learning models on large datasets is contingent on extensive computational resources with high financial and environmental costs. One way to address this is by extracting subsets that generalize on par with the full data. In this work, we propose a general framework, GRAD-MATCH, which finds subsets that closely match the gradient of the \\emph{training or validation} set. We find such subsets effectively using an orthogonal matching pursuit algorithm. We show rigorous theoretical and convergence guarantees of the proposed algorithm and, through our extensive experiments on real-world datasets, show the effectiveness of our proposed framework. We show that GRAD-MATCH significantly and consistently outperforms several recent data-selection algorithms and achieves the best accuracy-efficiency trade-off. GRAD-MATCH is available as a part of the CORDS toolkit: \\url{https://github.com/decile-team/cords}.}\n}", "pdf": "http://proceedings.mlr.press/v139/killamsetty21a/killamsetty21a.pdf", "supp": "", "pdf_size": 918725, "gs_citation": 267, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8588416693456815954&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Texas at Dallas, Dallas, USA+Department of Computer Science and Engineering, Indian Institute of Technology, Bombay, India; Department of Computer Science and Engineering, Indian Institute of Technology, Bombay, India; Department of Computer Science and Engineering, Indian Institute of Technology, Bombay, India; Department of Computer Science and Engineering, Indian Institute of Technology, Bombay, India; Department of Computer Science, University of Texas at Dallas, Dallas, USA", "aff_domain": "utdallas.edu; ; ; ; ", "email": "utdallas.edu; ; ; ; ", "github": "https://github.com/decile-team/cords", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/killamsetty21a.html", "aff_unique_index": "0+1;1;1;1;0", "aff_unique_norm": "University of Texas at Dallas;Indian Institute of Technology Bombay", "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Engineering", "aff_unique_url": "https://www.utdallas.edu;https://www.iitb.ac.in", "aff_unique_abbr": "UT Dallas;IIT Bombay", "aff_campus_unique_index": "0+1;1;1;1;0", "aff_campus_unique": "Dallas;Bombay", "aff_country_unique_index": "0+1;1;1;1;0", "aff_country_unique": "United States;India" }, { "title": "GRAND: Graph Neural Diffusion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8889", "id": "8889", "proceeding": "http://proceedings.mlr.press/v139/chamberlain21a.html", "slides": "/media/icml-2021/Slides/8889.pdf", "author_site": "Ben Chamberlain, James Rowbottom, Maria Gorinova, Michael Bronstein, Stefan Webb, Emanuele Rossi", "author": "Ben Chamberlain; James Rowbottom; Maria I Gorinova; Michael Bronstein; Stefan Webb; Emanuele Rossi", "abstract": "We present Graph Neural Diffusion (GRAND) that approaches deep learning on graphs as a continuous diffusion process and treats Graph Neural Networks (GNNs) as discretisations of an underlying PDE. In our model, the layer structure and topology correspond to the discretisation choices of temporal and spatial operators. Our approach allows a principled development of a broad new class of GNNs that are able to address the common plights of graph learning models such as depth, oversmoothing, and bottlenecks. Key to the success of our models are stability with respect to perturbations in the data and this is addressed for both implicit and explicit discretisation schemes. We develop linear and nonlinear versions of GRAND, which achieve competitive results on many standard graph benchmarks.", "bibtex": "@InProceedings{pmlr-v139-chamberlain21a,\n title = \t {GRAND: Graph Neural Diffusion},\n author = {Chamberlain, Ben and Rowbottom, James and Gorinova, Maria I and Bronstein, Michael and Webb, Stefan and Rossi, Emanuele},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1407--1418},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chamberlain21a/chamberlain21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chamberlain21a.html},\n abstract = \t {We present Graph Neural Diffusion (GRAND) that approaches deep learning on graphs as a continuous diffusion process and treats Graph Neural Networks (GNNs) as discretisations of an underlying PDE. In our model, the layer structure and topology correspond to the discretisation choices of temporal and spatial operators. Our approach allows a principled development of a broad new class of GNNs that are able to address the common plights of graph learning models such as depth, oversmoothing, and bottlenecks. Key to the success of our models are stability with respect to perturbations in the data and this is addressed for both implicit and explicit discretisation schemes. We develop linear and nonlinear versions of GRAND, which achieve competitive results on many standard graph benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/chamberlain21a/chamberlain21a.pdf", "supp": "", "pdf_size": 1544518, "gs_citation": 352, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6075394870168508131&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Twitter Inc., London, UK; Twitter Inc., London, UK; Twitter Inc., London, UK; Twitter Inc., London, UK; Twitter Inc., London, UK; Twitter Inc., London, UK + Imperial College London, UK + IDSIA/USI, Switzerland", "aff_domain": "twitter.com; ; ; ; ; ", "email": "twitter.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/chamberlain21a.html", "aff_unique_index": "0;0;0;0;0;0+1+2", "aff_unique_norm": "Twitter Inc.;Imperial College London;IDSIA", "aff_unique_dep": ";;", "aff_unique_url": "https://twitter.com;https://www.imperial.ac.uk;https://www.idsia.ch", "aff_unique_abbr": "Twitter;ICL;IDSIA", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;0;0;0;0;0+0+1", "aff_country_unique": "United Kingdom;Switzerland" }, { "title": "Gaussian Process-Based Real-Time Learning for Safety Critical Applications", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10619", "id": "10619", "proceeding": "http://proceedings.mlr.press/v139/lederer21a.html", "slides": "", "author_site": "Armin Lederer, Alejandro Ord\u00f3\u00f1ez Conejo, Korbinian Maier, Wenxin Xiao, Jonas Umlauft, Sandra Hirche", "author": "Armin Lederer; Alejandro J Ord\u00f3\u00f1ez Conejo; Korbinian A Maier; Wenxin Xiao; Jonas Umlauft; Sandra Hirche", "abstract": "The safe operation of physical systems typically relies on high-quality models. Since a continuous stream of data is generated during run-time, such models are often obtained through the application of Gaussian process regression because it provides guarantees on the prediction error. Due to its high computational complexity, Gaussian process regression must be used offline on batches of data, which prevents applications, where a fast adaptation through online learning is necessary to ensure safety. In order to overcome this issue, we propose the LoG-GP. It achieves a logarithmic update and prediction complexity in the number of training points through the aggregation of locally active Gaussian process models. Under weak assumptions on the aggregation scheme, it inherits safety guarantees from exact Gaussian process regression. These theoretical advantages are exemplarily exploited in the design of a safe and data-efficient, online-learning control policy. The efficiency and performance of the proposed real-time learning approach is demonstrated in a comparison to state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-lederer21a,\n title = \t {Gaussian Process-Based Real-Time Learning for Safety Critical Applications},\n author = {Lederer, Armin and Conejo, Alejandro J Ord{\\'o}{\\~n}ez and Maier, Korbinian A and Xiao, Wenxin and Umlauft, Jonas and Hirche, Sandra},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6055--6064},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lederer21a/lederer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lederer21a.html},\n abstract = \t {The safe operation of physical systems typically relies on high-quality models. Since a continuous stream of data is generated during run-time, such models are often obtained through the application of Gaussian process regression because it provides guarantees on the prediction error. Due to its high computational complexity, Gaussian process regression must be used offline on batches of data, which prevents applications, where a fast adaptation through online learning is necessary to ensure safety. In order to overcome this issue, we propose the LoG-GP. It achieves a logarithmic update and prediction complexity in the number of training points through the aggregation of locally active Gaussian process models. Under weak assumptions on the aggregation scheme, it inherits safety guarantees from exact Gaussian process regression. These theoretical advantages are exemplarily exploited in the design of a safe and data-efficient, online-learning control policy. The efficiency and performance of the proposed real-time learning approach is demonstrated in a comparison to state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/lederer21a/lederer21a.pdf", "supp": "", "pdf_size": 522172, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14549799860747120338&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany+1; Tecnol\u00f3gico de Costa Rica, Cartago, Costa Rica+2; Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany+1; Department of Computer Science and Technology, Peking University, Beijing, China+3; Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany+1; Department of Electrical and Computer Engineering, Technical University of Munich, Munich, Germany+1", "aff_domain": "tum.de; ; ; ; ; ", "email": "tum.de; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/lederer21a.html", "aff_unique_index": "0;2;0;3;0;0", "aff_unique_norm": "Technical University of Munich;;Tecnol\u00f3gico de Costa Rica;Peking University", "aff_unique_dep": "Department of Electrical and Computer Engineering;;;Department of Computer Science and Technology", "aff_unique_url": "https://www.tum.de;;https://www.tec.ac.cr;http://www.pku.edu.cn", "aff_unique_abbr": "TUM;;TEC;Peking U", "aff_campus_unique_index": "0;2;0;3;0;0", "aff_campus_unique": "Munich;;Cartago;Beijing", "aff_country_unique_index": "0;2;0;3;0;0", "aff_country_unique": "Germany;;Costa Rica;China" }, { "title": "Generalised Lipschitz Regularisation Equals Distributional Robustness", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10611", "id": "10611", "proceeding": "http://proceedings.mlr.press/v139/cranko21a.html", "slides": "/media/icml-2021/Slides/10611.pdf", "author_site": "Zac Cranko, Zhan Shi, Xinhua Zhang, Richard Nock, Simon Kornblith", "author": "Zac Cranko; Zhan Shi; Xinhua Zhang; Richard Nock; Simon Kornblith", "abstract": "The problem of adversarial examples has highlighted the need for a theory of regularisation that is general enough to apply to exotic function classes, such as universal approximators. In response, we have been able to significantly sharpen existing results regarding the relationship between distributional robustness and regularisation, when defined with a transportation cost uncertainty set. The theory allows us to characterise the conditions under which the distributional robustness equals a Lipschitz-regularised model, and to tightly quantify, for the first time, the slackness under very mild assumptions. As a theoretical application we show a new result explicating the connection between adversarial learning and distributional robustness. We then give new results for how to achieve Lipschitz regularisation of kernel classifiers, which are demonstrated experimentally.", "bibtex": "@InProceedings{pmlr-v139-cranko21a,\n title = \t {Generalised Lipschitz Regularisation Equals Distributional Robustness},\n author = {Cranko, Zac and Shi, Zhan and Zhang, Xinhua and Nock, Richard and Kornblith, Simon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2178--2188},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cranko21a/cranko21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cranko21a.html},\n abstract = \t {The problem of adversarial examples has highlighted the need for a theory of regularisation that is general enough to apply to exotic function classes, such as universal approximators. In response, we have been able to significantly sharpen existing results regarding the relationship between distributional robustness and regularisation, when defined with a transportation cost uncertainty set. The theory allows us to characterise the conditions under which the distributional robustness equals a Lipschitz-regularised model, and to tightly quantify, for the first time, the slackness under very mild assumptions. As a theoretical application we show a new result explicating the connection between adversarial learning and distributional robustness. We then give new results for how to achieve Lipschitz regularisation of kernel classifiers, which are demonstrated experimentally.}\n}", "pdf": "http://proceedings.mlr.press/v139/cranko21a/cranko21a.pdf", "supp": "", "pdf_size": 1201867, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17499833570681587535&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Universit\u00e4t T\u00fcbingen, T\u00fcbingen, Germany; University of Illinois at Chicago, IL, USA; University of Illinois at Chicago, IL, USA; Google Brain; Google Brain", "aff_domain": "uic.edu; ; ; ; ", "email": "uic.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/cranko21a.html", "aff_unique_index": "0;1;1;2;2", "aff_unique_norm": "Universit\u00e4t T\u00fcbingen;University of Illinois at Chicago;Google", "aff_unique_dep": ";;Google Brain", "aff_unique_url": "https://www.uni-tuebingen.de/;https://www.uic.edu;https://brain.google.com", "aff_unique_abbr": "Uni T\u00fcbingen;UIC;Google Brain", "aff_campus_unique_index": "0;1;1;2;2", "aff_campus_unique": "T\u00fcbingen;Chicago;Mountain View", "aff_country_unique_index": "0;1;1;1;1", "aff_country_unique": "Germany;United States" }, { "title": "Generalizable Episodic Memory for Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9885", "id": "9885", "proceeding": "http://proceedings.mlr.press/v139/hu21d.html", "slides": "/media/icml-2021/Slides/9885.pdf", "author_site": "Hao Hu, Jianing Ye, Guangxiang Zhu, Zhizhou Ren, Chongjie Zhang", "author": "Hao Hu; Jianing Ye; Guangxiang Zhu; Zhizhou Ren; Chongjie Zhang", "abstract": "Episodic memory-based methods can rapidly latch onto past successful strategies by a non-parametric memory and improve sample efficiency of traditional reinforcement learning. However, little effort is put into the continuous domain, where a state is never visited twice, and previous episodic methods fail to efficiently aggregate experience across trajectories. To address this problem, we propose Generalizable Episodic Memory (GEM), which effectively organizes the state-action values of episodic memory in a generalizable manner and supports implicit planning on memorized trajectories. GEM utilizes a double estimator to reduce the overestimation bias induced by value propagation in the planning process. Empirical evaluation shows that our method significantly outperforms existing trajectory-based methods on various MuJoCo continuous control tasks. To further show the general applicability, we evaluate our method on Atari games with discrete action space, which also shows a significant improvement over baseline algorithms.", "bibtex": "@InProceedings{pmlr-v139-hu21d,\n title = \t {Generalizable Episodic Memory for Deep Reinforcement Learning},\n author = {Hu, Hao and Ye, Jianing and Zhu, Guangxiang and Ren, Zhizhou and Zhang, Chongjie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4380--4390},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hu21d/hu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/hu21d.html},\n abstract = \t {Episodic memory-based methods can rapidly latch onto past successful strategies by a non-parametric memory and improve sample efficiency of traditional reinforcement learning. However, little effort is put into the continuous domain, where a state is never visited twice, and previous episodic methods fail to efficiently aggregate experience across trajectories. To address this problem, we propose Generalizable Episodic Memory (GEM), which effectively organizes the state-action values of episodic memory in a generalizable manner and supports implicit planning on memorized trajectories. GEM utilizes a double estimator to reduce the overestimation bias induced by value propagation in the planning process. Empirical evaluation shows that our method significantly outperforms existing trajectory-based methods on various MuJoCo continuous control tasks. To further show the general applicability, we evaluate our method on Atari games with discrete action space, which also shows a significant improvement over baseline algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/hu21d/hu21d.pdf", "supp": "", "pdf_size": 9760719, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2172996156668096387&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China; Peking University, Beijing, China; The Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China; University of Illinois at Urbana-Champaign, IL, USA; The Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China", "aff_domain": "tsinghua.edu.cn;pku.edu.cn;tsinghua.edu.cn;illinois.edu;mail.tsinghua.edu.cn", "email": "tsinghua.edu.cn;pku.edu.cn;tsinghua.edu.cn;illinois.edu;mail.tsinghua.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hu21d.html", "aff_unique_index": "0;1;0;2;0", "aff_unique_norm": "Tsinghua University;Peking University;University of Illinois Urbana-Champaign", "aff_unique_dep": "Institute for Interdisciplinary Information Sciences;;", "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.pku.edu.cn;https://illinois.edu", "aff_unique_abbr": "Tsinghua;Peking U;UIUC", "aff_campus_unique_index": "0;0;0;1;0", "aff_campus_unique": "Beijing;Urbana-Champaign", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "China;United States" }, { "title": "Generalization Bounds in the Presence of Outliers: a Median-of-Means Study", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8843", "id": "8843", "proceeding": "http://proceedings.mlr.press/v139/laforgue21a.html", "slides": "/media/icml-2021/Slides/8843.pdf", "author_site": "Pierre Laforgue, Guillaume Staerman, Stephan Cl\u00e9men\u00e7on", "author": "Pierre Laforgue; Guillaume Staerman; Stephan Cl\u00e9men\u00e7on", "abstract": "In contrast to the empirical mean, the Median-of-Means (MoM) is an estimator of the mean $\\theta$ of a square integrable r.v. Z, around which accurate nonasymptotic confidence bounds can be built, even when Z does not exhibit a sub-Gaussian tail behavior. Thanks to the high confidence it achieves on heavy-tailed data, MoM has found various applications in machine learning, where it is used to design training procedures that are not sensitive to atypical observations. More recently, a new line of work is now trying to characterize and leverage MoM\u2019s ability to deal with corrupted data. In this context, the present work proposes a general study of MoM\u2019s concentration properties under the contamination regime, that provides a clear understanding on the impact of the outlier proportion and the number of blocks chosen. The analysis is extended to (multisample) U-statistics, i.e. averages over tuples of observations, that raise additional challenges due to the dependence induced. Finally, we show that the latter bounds can be used in a straightforward fashion to derive generalization guarantees for pairwise learning in a contaminated setting, and propose an algorithm to compute provably reliable decision functions.", "bibtex": "@InProceedings{pmlr-v139-laforgue21a,\n title = \t {Generalization Bounds in the Presence of Outliers: a Median-of-Means Study},\n author = {Laforgue, Pierre and Staerman, Guillaume and Cl{\\'e}men{\\c{c}}on, Stephan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5937--5947},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/laforgue21a/laforgue21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/laforgue21a.html},\n abstract = \t {In contrast to the empirical mean, the Median-of-Means (MoM) is an estimator of the mean $\\theta$ of a square integrable r.v. Z, around which accurate nonasymptotic confidence bounds can be built, even when Z does not exhibit a sub-Gaussian tail behavior. Thanks to the high confidence it achieves on heavy-tailed data, MoM has found various applications in machine learning, where it is used to design training procedures that are not sensitive to atypical observations. More recently, a new line of work is now trying to characterize and leverage MoM\u2019s ability to deal with corrupted data. In this context, the present work proposes a general study of MoM\u2019s concentration properties under the contamination regime, that provides a clear understanding on the impact of the outlier proportion and the number of blocks chosen. The analysis is extended to (multisample) U-statistics, i.e. averages over tuples of observations, that raise additional challenges due to the dependence induced. Finally, we show that the latter bounds can be used in a straightforward fashion to derive generalization guarantees for pairwise learning in a contaminated setting, and propose an algorithm to compute provably reliable decision functions.}\n}", "pdf": "http://proceedings.mlr.press/v139/laforgue21a/laforgue21a.pdf", "supp": "", "pdf_size": 5686287, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8199360461179165932&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Universit\u00e0 degli Studi di Milano, Italy; LTCI, T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France; LTCI, T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France", "aff_domain": "unimi.it; ; ", "email": "unimi.it; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/laforgue21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Universit\u00e0 degli Studi di Milano;T\u00e9l\u00e9com Paris", "aff_unique_dep": ";LTCI", "aff_unique_url": "https://www.unimi.it;https://www.telecom-paris.fr", "aff_unique_abbr": "UniMi;T\u00e9l\u00e9com Paris", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Italy;France" }, { "title": "Generalization Error Bound for Hyperbolic Ordinal Embedding", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9477", "id": "9477", "proceeding": "http://proceedings.mlr.press/v139/suzuki21a.html", "slides": "", "author_site": "Atsushi Suzuki, Atsushi Nitanda, Jing Wang, Linchuan Xu, Kenji Yamanishi, Marc Cavazza", "author": "Atsushi Suzuki; Atsushi Nitanda; Jing Wang; Linchuan Xu; Kenji Yamanishi; Marc Cavazza", "abstract": "Hyperbolic ordinal embedding (HOE) represents entities as points in hyperbolic space so that they agree as well as possible with given constraints in the form of entity $i$ is more similar to entity $j$ than to entity $k$. It has been experimentally shown that HOE can obtain representations of hierarchical data such as a knowledge base and a citation network effectively, owing to hyperbolic space\u2019s exponential growth property. However, its theoretical analysis has been limited to ideal noiseless settings, and its generalization error in compensation for hyperbolic space\u2019s exponential representation ability has not been guaranteed. The difficulty is that existing generalization error bound derivations for ordinal embedding based on the Gramian matrix are not applicable in HOE, since hyperbolic space is not inner-product space. In this paper, through our novel characterization of HOE with decomposed Lorentz Gramian matrices, we provide a generalization error bound of HOE for the first time, which is at most exponential with respect to the embedding space\u2019s radius. Our comparison between the bounds of HOE and Euclidean ordinal embedding shows that HOE\u2019s generalization error comes at a reasonable cost considering its exponential representation ability.", "bibtex": "@InProceedings{pmlr-v139-suzuki21a,\n title = \t {Generalization Error Bound for Hyperbolic Ordinal Embedding},\n author = {Suzuki, Atsushi and Nitanda, Atsushi and Wang, Jing and Xu, Linchuan and Yamanishi, Kenji and Cavazza, Marc},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10011--10021},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/suzuki21a/suzuki21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/suzuki21a.html},\n abstract = \t {Hyperbolic ordinal embedding (HOE) represents entities as points in hyperbolic space so that they agree as well as possible with given constraints in the form of entity $i$ is more similar to entity $j$ than to entity $k$. It has been experimentally shown that HOE can obtain representations of hierarchical data such as a knowledge base and a citation network effectively, owing to hyperbolic space\u2019s exponential growth property. However, its theoretical analysis has been limited to ideal noiseless settings, and its generalization error in compensation for hyperbolic space\u2019s exponential representation ability has not been guaranteed. The difficulty is that existing generalization error bound derivations for ordinal embedding based on the Gramian matrix are not applicable in HOE, since hyperbolic space is not inner-product space. In this paper, through our novel characterization of HOE with decomposed Lorentz Gramian matrices, we provide a generalization error bound of HOE for the first time, which is at most exponential with respect to the embedding space\u2019s radius. Our comparison between the bounds of HOE and Euclidean ordinal embedding shows that HOE\u2019s generalization error comes at a reasonable cost considering its exponential representation ability.}\n}", "pdf": "http://proceedings.mlr.press/v139/suzuki21a/suzuki21a.pdf", "supp": "", "pdf_size": 344879, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12978127666597559426&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "School of Computing and Mathematical Sciences, Faculty of Liberal Arts and Sciences, University of Greenwich, United Kingdom; Department of Artificial Intelligence, Faculty of Computer Science and Systems Engineering, Kyushu Institute of Technology, Japan; Department of Computing, The Hong Kong Polytechnic University, Hong Kong; Department of Computing, The Hong Kong Polytechnic University, Hong Kong; School of Computing and Mathematical Sciences, Faculty of Liberal Arts and Sciences, University of Greenwich, United Kingdom; Department of Mathematical Informatics, Graduate School of Information Science and Technology, The University of Tokyo, Japan", "aff_domain": "gmail.com; ;greenwich.ac.uk; ; ; ", "email": "gmail.com; ;greenwich.ac.uk; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/suzuki21a.html", "aff_unique_index": "0;1;2;2;0;3", "aff_unique_norm": "University of Greenwich;Kyushu Institute of Technology;Hong Kong Polytechnic University;University of Tokyo", "aff_unique_dep": "School of Computing and Mathematical Sciences;Department of Artificial Intelligence;Department of Computing;Department of Mathematical Informatics, Graduate School of Information Science and Technology", "aff_unique_url": "https://www2.gre.ac.uk;https://www.kyutech.ac.jp;https://www.polyu.edu.hk;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "Greenwich;Kyutech;PolyU;UTokyo", "aff_campus_unique_index": "1;1;2", "aff_campus_unique": ";Hong Kong SAR;Tokyo", "aff_country_unique_index": "0;1;2;2;0;1", "aff_country_unique": "United Kingdom;Japan;China" }, { "title": "Generalization Guarantees for Neural Architecture Search with Train-Validation Split", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10005", "id": "10005", "proceeding": "http://proceedings.mlr.press/v139/oymak21a.html", "slides": "", "author_site": "Samet Oymak, Mingchen Li, Mahdi Soltanolkotabi", "author": "Samet Oymak; Mingchen Li; Mahdi Soltanolkotabi", "abstract": "Neural Architecture Search (NAS) is a popular method for automatically designing optimized deep-learning architectures. NAS methods commonly use bilevel optimization where one optimizes the weights over the training data (lower-level problem) and hyperparameters - such as the architecture - over the validation data (upper-level problem). This paper explores the statistical aspects of such problems with train-validation splits. In practice, the lower-level problem is often overparameterized and can easily achieve zero loss. Thus, a-priori, it seems impossible to distinguish the right hyperparameters based on training loss alone which motivates a better understanding of train-validation split. To this aim, we first show that refined properties of the validation loss such as risk and hyper-gradients are indicative of those of the true test loss and help prevent overfitting with a near-minimal validation sample size. Importantly, this is established for continuous search spaces which are relevant for differentiable search schemes. We then establish generalization bounds for NAS problems with an emphasis on an activation search problem and gradient-based methods. Finally, we show rigorous connections between NAS and low-rank matrix learning which leads to algorithmic insights where the solution of the upper problem can be accurately learned via spectral methods to achieve near-minimal risk.", "bibtex": "@InProceedings{pmlr-v139-oymak21a,\n title = \t {Generalization Guarantees for Neural Architecture Search with Train-Validation Split},\n author = {Oymak, Samet and Li, Mingchen and Soltanolkotabi, Mahdi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8291--8301},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/oymak21a/oymak21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/oymak21a.html},\n abstract = \t {Neural Architecture Search (NAS) is a popular method for automatically designing optimized deep-learning architectures. NAS methods commonly use bilevel optimization where one optimizes the weights over the training data (lower-level problem) and hyperparameters - such as the architecture - over the validation data (upper-level problem). This paper explores the statistical aspects of such problems with train-validation splits. In practice, the lower-level problem is often overparameterized and can easily achieve zero loss. Thus, a-priori, it seems impossible to distinguish the right hyperparameters based on training loss alone which motivates a better understanding of train-validation split. To this aim, we first show that refined properties of the validation loss such as risk and hyper-gradients are indicative of those of the true test loss and help prevent overfitting with a near-minimal validation sample size. Importantly, this is established for continuous search spaces which are relevant for differentiable search schemes. We then establish generalization bounds for NAS problems with an emphasis on an activation search problem and gradient-based methods. Finally, we show rigorous connections between NAS and low-rank matrix learning which leads to algorithmic insights where the solution of the upper problem can be accurately learned via spectral methods to achieve near-minimal risk.}\n}", "pdf": "http://proceedings.mlr.press/v139/oymak21a/oymak21a.pdf", "supp": "", "pdf_size": 2260122, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3948053856982657624&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Eng., University of California, Riverside; Department of Computer Science and Eng., University of California, Riverside; Ming Hsieh Department of Electrical Eng., University of Southern California", "aff_domain": "ucr.edu;ucr.edu;usc.edu", "email": "ucr.edu;ucr.edu;usc.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/oymak21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Riverside;University of Southern California", "aff_unique_dep": "Department of Electrical and Computer Engineering;Ming Hsieh Department of Electrical Engineering", "aff_unique_url": "https://www.ucr.edu;https://www.usc.edu", "aff_unique_abbr": "UCR;USC", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Riverside;Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Generalized Doubly Reparameterized Gradient Estimators", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10709", "id": "10709", "proceeding": "http://proceedings.mlr.press/v139/bauer21a.html", "slides": "", "author_site": "Matthias Bauer, Andriy Mnih", "author": "Matthias Bauer; Andriy Mnih", "abstract": "Efficient low-variance gradient estimation enabled by the reparameterization trick (RT) has been essential to the success of variational autoencoders. Doubly-reparameterized gradients (DReGs) improve on the RT for multi-sample variational bounds by applying reparameterization a second time for an additional reduction in variance. Here, we develop two generalizations of the DReGs estimator and show that they can be used to train conditional and hierarchical VAEs on image modelling tasks more effectively. We first extend the estimator to hierarchical models with several stochastic layers by showing how to treat additional score function terms due to the hierarchical variational posterior. We then generalize DReGs to score functions of arbitrary distributions instead of just those of the sampling distribution, which makes the estimator applicable to the parameters of the prior in addition to those of the posterior.", "bibtex": "@InProceedings{pmlr-v139-bauer21a,\n title = \t {Generalized Doubly Reparameterized Gradient Estimators},\n author = {Bauer, Matthias and Mnih, Andriy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {738--747},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bauer21a/bauer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bauer21a.html},\n abstract = \t {Efficient low-variance gradient estimation enabled by the reparameterization trick (RT) has been essential to the success of variational autoencoders. Doubly-reparameterized gradients (DReGs) improve on the RT for multi-sample variational bounds by applying reparameterization a second time for an additional reduction in variance. Here, we develop two generalizations of the DReGs estimator and show that they can be used to train conditional and hierarchical VAEs on image modelling tasks more effectively. We first extend the estimator to hierarchical models with several stochastic layers by showing how to treat additional score function terms due to the hierarchical variational posterior. We then generalize DReGs to score functions of arbitrary distributions instead of just those of the sampling distribution, which makes the estimator applicable to the parameters of the prior in addition to those of the posterior.}\n}", "pdf": "http://proceedings.mlr.press/v139/bauer21a/bauer21a.pdf", "supp": "", "pdf_size": 642310, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6842204331739990334&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "deepmind.com;deepmind.com", "email": "deepmind.com;deepmind.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bauer21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Generating images with sparse representations", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8791", "id": "8791", "proceeding": "http://proceedings.mlr.press/v139/nash21a.html", "slides": "/media/icml-2021/Slides/8791.pdf", "author_site": "Charlie Nash, Jacob Menick, Sander Dieleman, Peter Battaglia", "author": "Charlie Nash; Jacob Menick; Sander Dieleman; Peter Battaglia", "abstract": "The high dimensionality of images presents architecture and sampling-efficiency challenges for likelihood-based generative models. Previous approaches such as VQ-VAE use deep autoencoders to obtain compact representations, which are more practical as inputs for likelihood-based models. We present an alternative approach, inspired by common image compression methods like JPEG, and convert images to quantized discrete cosine transform (DCT) blocks, which are represented sparsely as a sequence of DCT channel, spatial location, and DCT coefficient triples. We propose a Transformer-based autoregressive architecture, which is trained to sequentially predict the conditional distribution of the next element in such sequences, and which scales effectively to high resolution images. On a range of image datasets, we demonstrate that our approach can generate high quality, diverse images, with sample metric scores competitive with state of the art methods. We additionally show that simple modifications to our method yield effective image colorization and super-resolution models.", "bibtex": "@InProceedings{pmlr-v139-nash21a,\n title = \t {Generating images with sparse representations},\n author = {Nash, Charlie and Menick, Jacob and Dieleman, Sander and Battaglia, Peter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7958--7968},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nash21a/nash21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nash21a.html},\n abstract = \t {The high dimensionality of images presents architecture and sampling-efficiency challenges for likelihood-based generative models. Previous approaches such as VQ-VAE use deep autoencoders to obtain compact representations, which are more practical as inputs for likelihood-based models. We present an alternative approach, inspired by common image compression methods like JPEG, and convert images to quantized discrete cosine transform (DCT) blocks, which are represented sparsely as a sequence of DCT channel, spatial location, and DCT coefficient triples. We propose a Transformer-based autoregressive architecture, which is trained to sequentially predict the conditional distribution of the next element in such sequences, and which scales effectively to high resolution images. On a range of image datasets, we demonstrate that our approach can generate high quality, diverse images, with sample metric scores competitive with state of the art methods. We additionally show that simple modifications to our method yield effective image colorization and super-resolution models.}\n}", "pdf": "http://proceedings.mlr.press/v139/nash21a/nash21a.pdf", "supp": "", "pdf_size": 7205733, "gs_citation": 219, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2348168845736317029&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/nash21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Generative Adversarial Networks for Markovian Temporal Dynamics: Stochastic Continuous Data Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10659", "id": "10659", "proceeding": "http://proceedings.mlr.press/v139/park21d.html", "slides": "", "author_site": "Sung Woo Park, Dong Wook Shu, Junseok Kwon", "author": "Sung Woo Park; Dong Wook Shu; Junseok Kwon", "abstract": "In this paper, we present a novel generative adversarial network (GAN) that can describe Markovian temporal dynamics. To generate stochastic sequential data, we introduce a novel stochastic differential equation-based conditional generator and spatial-temporal constrained discriminator networks. To stabilize the learning dynamics of the min-max type of the GAN objective function, we propose well-posed constraint terms for both networks. We also propose a novel conditional Markov Wasserstein distance to induce a pathwise Wasserstein distance. The experimental results demonstrate that our method outperforms state-of-the-art methods using several different types of data.", "bibtex": "@InProceedings{pmlr-v139-park21d,\n title = \t {Generative Adversarial Networks for Markovian Temporal Dynamics: Stochastic Continuous Data Generation},\n author = {Park, Sung Woo and Shu, Dong Wook and Kwon, Junseok},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8413--8421},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/park21d/park21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/park21d.html},\n abstract = \t {In this paper, we present a novel generative adversarial network (GAN) that can describe Markovian temporal dynamics. To generate stochastic sequential data, we introduce a novel stochastic differential equation-based conditional generator and spatial-temporal constrained discriminator networks. To stabilize the learning dynamics of the min-max type of the GAN objective function, we propose well-posed constraint terms for both networks. We also propose a novel conditional Markov Wasserstein distance to induce a pathwise Wasserstein distance. The experimental results demonstrate that our method outperforms state-of-the-art methods using several different types of data.}\n}", "pdf": "http://proceedings.mlr.press/v139/park21d/park21d.pdf", "supp": "", "pdf_size": 3485898, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=741576039859285540&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "School of Computer Science and Engineering, Artificial Intelligence Graduate School, Chung-Ang University, Seoul, Korea; School of Computer Science and Engineering, Artificial Intelligence Graduate School, Chung-Ang University, Seoul, Korea; School of Computer Science and Engineering, Artificial Intelligence Graduate School, Chung-Ang University, Seoul, Korea", "aff_domain": "gmail.com;naver.com;cau.ac.kr", "email": "gmail.com;naver.com;cau.ac.kr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/park21d.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Chung-Ang University", "aff_unique_dep": "School of Computer Science and Engineering", "aff_unique_url": "http://www.cau.ac.kr", "aff_unique_abbr": "CAU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0;0;0", "aff_country_unique": "South Korea" }, { "title": "Generative Adversarial Transformers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8997", "id": "8997", "proceeding": "http://proceedings.mlr.press/v139/hudson21a.html", "slides": "/media/icml-2021/Slides/8997.pdf", "author_site": "Drew A. Hudson, Larry Zitnick", "author": "Drew A Hudson; Larry Zitnick", "abstract": "We introduce the GANsformer, a novel and efficient type of transformer, and explore it for the task of visual generative modeling. The network employs a bipartite structure that enables long-range interactions across the image, while maintaining computation of linear efficiency, that can readily scale to high-resolution synthesis. It iteratively propagates information from a set of latent variables to the evolving visual features and vice versa, to support the refinement of each in light of the other and encourage the emergence of compositional representations of objects and scenes. In contrast to the classic transformer architecture, it utilizes multiplicative integration that allows flexible region-based modulation, and can thus be seen as a generalization of the successful StyleGAN network. We demonstrate the model\u2019s strength and robustness through a careful evaluation over a range of datasets, from simulated multi-object environments to rich real-world indoor and outdoor scenes, showing it achieves state-of-the-art results in terms of image quality and diversity, while enjoying fast learning and better data-efficiency. Further qualitative and quantitative experiments offer us an insight into the model\u2019s inner workings, revealing improved interpretability and stronger disentanglement, and illustrating the benefits and efficacy of our approach. An implementation of the model is available at https://github.com/dorarad/gansformer.", "bibtex": "@InProceedings{pmlr-v139-hudson21a,\n title = \t {Generative Adversarial Transformers},\n author = {Hudson, Drew A and Zitnick, Larry},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4487--4499},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hudson21a/hudson21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hudson21a.html},\n abstract = \t {We introduce the GANsformer, a novel and efficient type of transformer, and explore it for the task of visual generative modeling. The network employs a bipartite structure that enables long-range interactions across the image, while maintaining computation of linear efficiency, that can readily scale to high-resolution synthesis. It iteratively propagates information from a set of latent variables to the evolving visual features and vice versa, to support the refinement of each in light of the other and encourage the emergence of compositional representations of objects and scenes. In contrast to the classic transformer architecture, it utilizes multiplicative integration that allows flexible region-based modulation, and can thus be seen as a generalization of the successful StyleGAN network. We demonstrate the model\u2019s strength and robustness through a careful evaluation over a range of datasets, from simulated multi-object environments to rich real-world indoor and outdoor scenes, showing it achieves state-of-the-art results in terms of image quality and diversity, while enjoying fast learning and better data-efficiency. Further qualitative and quantitative experiments offer us an insight into the model\u2019s inner workings, revealing improved interpretability and stronger disentanglement, and illustrating the benefits and efficacy of our approach. An implementation of the model is available at https://github.com/dorarad/gansformer.}\n}", "pdf": "http://proceedings.mlr.press/v139/hudson21a/hudson21a.pdf", "supp": "", "pdf_size": 2674236, "gs_citation": 240, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2292407280859337870&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Computer Science Department, Stanford University, CA, USA; Facebook AI Research, CA, USA", "aff_domain": "cs.stanford.edu; ", "email": "cs.stanford.edu; ", "github": "https://github.com/dorarad/gansformer", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hudson21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Stanford University;Meta", "aff_unique_dep": "Computer Science Department;AI Research", "aff_unique_url": "https://www.stanford.edu;https://research.facebook.com", "aff_unique_abbr": "Stanford;FAIR", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Stanford;CA", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Generative Causal Explanations for Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8417", "id": "8417", "proceeding": "http://proceedings.mlr.press/v139/lin21d.html", "slides": "/media/icml-2021/Slides/8417.pdf", "author_site": "Wanyu Lin, Hao Lan, Baochun Li", "author": "Wanyu Lin; Hao Lan; Baochun Li", "abstract": "This paper presents {\\em Gem}, a model-agnostic approach for providing interpretable explanations for any GNNs on various graph learning tasks. Specifically, we formulate the problem of providing explanations for the decisions of GNNs as a causal learning task. Then we train a causal explanation model equipped with a loss function based on Granger causality. Different from existing explainers for GNNs, {\\em Gem} explains GNNs on graph-structured data from a causal perspective. It has better generalization ability as it has no requirements on the internal structure of the GNNs or prior knowledge on the graph learning tasks. In addition, {\\em Gem}, once trained, can be used to explain the target GNN very quickly. Our theoretical analysis shows that several recent explainers fall into a unified framework of {\\em additive feature attribution methods}. Experimental results on synthetic and real-world datasets show that {\\em Gem} achieves a relative increase of the explanation accuracy by up to $30%$ and speeds up the explanation process by up to $110\\times$ as compared to its state-of-the-art alternatives.", "bibtex": "@InProceedings{pmlr-v139-lin21d,\n title = \t {Generative Causal Explanations for Graph Neural Networks},\n author = {Lin, Wanyu and Lan, Hao and Li, Baochun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6666--6679},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lin21d/lin21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/lin21d.html},\n abstract = \t {This paper presents {\\em Gem}, a model-agnostic approach for providing interpretable explanations for any GNNs on various graph learning tasks. Specifically, we formulate the problem of providing explanations for the decisions of GNNs as a causal learning task. Then we train a causal explanation model equipped with a loss function based on Granger causality. Different from existing explainers for GNNs, {\\em Gem} explains GNNs on graph-structured data from a causal perspective. It has better generalization ability as it has no requirements on the internal structure of the GNNs or prior knowledge on the graph learning tasks. In addition, {\\em Gem}, once trained, can be used to explain the target GNN very quickly. Our theoretical analysis shows that several recent explainers fall into a unified framework of {\\em additive feature attribution methods}. Experimental results on synthetic and real-world datasets show that {\\em Gem} achieves a relative increase of the explanation accuracy by up to $30%$ and speeds up the explanation process by up to $110\\times$ as compared to its state-of-the-art alternatives.}\n}", "pdf": "http://proceedings.mlr.press/v139/lin21d/lin21d.pdf", "supp": "", "pdf_size": 5823754, "gs_citation": 221, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12091471527396222072&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computing, The Hong Kong Polytechnic University, Hong Kong, China+Department of Electrical & Computer Engineering, University of Toronto, Toronto, Canada; Department of Electrical & Computer Engineering, University of Toronto, Toronto, Canada; Department of Electrical & Computer Engineering, University of Toronto, Toronto, Canada", "aff_domain": "comp.polyu.edu.hk; ; ", "email": "comp.polyu.edu.hk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lin21d.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Hong Kong Polytechnic University;University of Toronto", "aff_unique_dep": "Department of Computing;Department of Electrical & Computer Engineering", "aff_unique_url": "https://www.polyu.edu.hk;https://www.utoronto.ca", "aff_unique_abbr": "PolyU;U of T", "aff_campus_unique_index": "0+1;1;1", "aff_campus_unique": "Hong Kong;Toronto", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "China;Canada" }, { "title": "Generative Particle Variational Inference via Estimation of Functional Gradients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9249", "id": "9249", "proceeding": "http://proceedings.mlr.press/v139/ratzlaff21a.html", "slides": "/media/icml-2021/Slides/9249.pdf", "author_site": "Neale Ratzlaff, Jerry Bai, Fuxin Li, Wei Xu", "author": "Neale Ratzlaff; Qinxun Bai; Li Fuxin; Wei Xu", "abstract": "Recently, particle-based variational inference (ParVI) methods have gained interest because they can avoid arbitrary parametric assumptions that are common in variational inference. However, many ParVI approaches do not allow arbitrary sampling from the posterior, and the few that do allow such sampling suffer from suboptimality. This work proposes a new method for learning to approximately sample from the posterior distribution. We construct a neural sampler that is trained with the functional gradient of the KL-divergence between the empirical sampling distribution and the target distribution, assuming the gradient resides within a reproducing kernel Hilbert space. Our generative ParVI (GPVI) approach maintains the asymptotic performance of ParVI methods while offering the flexibility of a generative sampler. Through carefully constructed experiments, we show that GPVI outperforms previous generative ParVI methods such as amortized SVGD, and is competitive with ParVI as well as gold-standard approaches like Hamiltonian Monte Carlo for fitting both exactly known and intractable target distributions.", "bibtex": "@InProceedings{pmlr-v139-ratzlaff21a,\n title = \t {Generative Particle Variational Inference via Estimation of Functional Gradients},\n author = {Ratzlaff, Neale and Bai, Qinxun and Fuxin, Li and Xu, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8869--8879},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ratzlaff21a/ratzlaff21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ratzlaff21a.html},\n abstract = \t {Recently, particle-based variational inference (ParVI) methods have gained interest because they can avoid arbitrary parametric assumptions that are common in variational inference. However, many ParVI approaches do not allow arbitrary sampling from the posterior, and the few that do allow such sampling suffer from suboptimality. This work proposes a new method for learning to approximately sample from the posterior distribution. We construct a neural sampler that is trained with the functional gradient of the KL-divergence between the empirical sampling distribution and the target distribution, assuming the gradient resides within a reproducing kernel Hilbert space. Our generative ParVI (GPVI) approach maintains the asymptotic performance of ParVI methods while offering the flexibility of a generative sampler. Through carefully constructed experiments, we show that GPVI outperforms previous generative ParVI methods such as amortized SVGD, and is competitive with ParVI as well as gold-standard approaches like Hamiltonian Monte Carlo for fitting both exactly known and intractable target distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/ratzlaff21a/ratzlaff21a.pdf", "supp": "", "pdf_size": 3052705, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9012729491643973248&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical Engineering and Computer Science, Oregon State University; Horizon Robotics, Cupertino, California; Department of Electrical Engineering and Computer Science, Oregon State University; Horizon Robotics, Cupertino, California", "aff_domain": "oregonstate.edu; ; ; ", "email": "oregonstate.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ratzlaff21a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "Oregon State University;Horizon Robotics", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;", "aff_unique_url": "https://oregonstate.edu;https://www.horizon-robotics.com", "aff_unique_abbr": "OSU;", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Cupertino", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Generative Video Transformer: Can Objects be the Words?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9969", "id": "9969", "proceeding": "http://proceedings.mlr.press/v139/wu21h.html", "slides": "/media/icml-2021/Slides/9969.pdf", "author_site": "Yi-Fu Wu, Jaesik Yoon, Sungjin Ahn", "author": "Yi-Fu Wu; Jaesik Yoon; Sungjin Ahn", "abstract": "Transformers have been successful for many natural language processing tasks. However, applying transformers to the video domain for tasks such as long-term video generation and scene understanding has remained elusive due to the high computational complexity and the lack of natural tokenization. In this paper, we propose the ObjectCentric Video Transformer (OCVT) which utilizes an object-centric approach for decomposing scenes into tokens suitable for use in a generative video transformer. By factoring the video into objects, our fully unsupervised model is able to learn complex spatio-temporal dynamics of multiple interacting objects in a scene and generate future frames of the video. Our model is also significantly more memory-efficient than pixel-based models and thus able to train on videos of length up to 70 frames with a single 48GB GPU. We compare our model with previous RNN-based approaches as well as other possible video transformer baselines. We demonstrate OCVT performs well when compared to baselines in generating future frames. OCVT also develops useful representations for video reasoning, achieving start-of-the-art performance on the CATER task.", "bibtex": "@InProceedings{pmlr-v139-wu21h,\n title = \t {Generative Video Transformer: Can Objects be the Words?},\n author = {Wu, Yi-Fu and Yoon, Jaesik and Ahn, Sungjin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11307--11318},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21h/wu21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21h.html},\n abstract = \t {Transformers have been successful for many natural language processing tasks. However, applying transformers to the video domain for tasks such as long-term video generation and scene understanding has remained elusive due to the high computational complexity and the lack of natural tokenization. In this paper, we propose the ObjectCentric Video Transformer (OCVT) which utilizes an object-centric approach for decomposing scenes into tokens suitable for use in a generative video transformer. By factoring the video into objects, our fully unsupervised model is able to learn complex spatio-temporal dynamics of multiple interacting objects in a scene and generate future frames of the video. Our model is also significantly more memory-efficient than pixel-based models and thus able to train on videos of length up to 70 frames with a single 48GB GPU. We compare our model with previous RNN-based approaches as well as other possible video transformer baselines. We demonstrate OCVT performs well when compared to baselines in generating future frames. OCVT also develops useful representations for video reasoning, achieving start-of-the-art performance on the CATER task.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21h/wu21h.pdf", "supp": "", "pdf_size": 3230213, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15731642592907152939&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Rutgers University + SAP Labs; Department of Computer Science, Rutgers University + Rutgers Center for Cognitive Science; Department of Computer Science, Rutgers University + Rutgers Center for Cognitive Science", "aff_domain": "gmail.com; ;gmail.com", "email": "gmail.com; ;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wu21h.html", "aff_unique_index": "0+1;0+0;0+0", "aff_unique_norm": "Rutgers University;SAP SE", "aff_unique_dep": "Department of Computer Science;SAP Labs", "aff_unique_url": "https://www.rutgers.edu;https://labs.sap/", "aff_unique_abbr": "Rutgers;SAP Labs", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+0;0+0", "aff_country_unique": "United States;Germany" }, { "title": "GeomCA: Geometric Evaluation of Data Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9663", "id": "9663", "proceeding": "http://proceedings.mlr.press/v139/poklukar21a.html", "slides": "", "author_site": "Petra Poklukar, Anastasiia Varava, Danica Kragic", "author": "Petra Poklukar; Anastasiia Varava; Danica Kragic", "abstract": "Evaluating the quality of learned representations without relying on a downstream task remains one of the challenges in representation learning. In this work, we present Geometric Component Analysis (GeomCA) algorithm that evaluates representation spaces based on their geometric and topological properties. GeomCA can be applied to representations of any dimension, independently of the model that generated them. We demonstrate its applicability by analyzing representations obtained from a variety of scenarios, such as contrastive learning models, generative models and supervised learning models.", "bibtex": "@InProceedings{pmlr-v139-poklukar21a,\n title = \t {GeomCA: Geometric Evaluation of Data Representations},\n author = {Poklukar, Petra and Varava, Anastasiia and Kragic, Danica},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8588--8598},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/poklukar21a/poklukar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/poklukar21a.html},\n abstract = \t {Evaluating the quality of learned representations without relying on a downstream task remains one of the challenges in representation learning. In this work, we present Geometric Component Analysis (GeomCA) algorithm that evaluates representation spaces based on their geometric and topological properties. GeomCA can be applied to representations of any dimension, independently of the model that generated them. We demonstrate its applicability by analyzing representations obtained from a variety of scenarios, such as contrastive learning models, generative models and supervised learning models.}\n}", "pdf": "http://proceedings.mlr.press/v139/poklukar21a/poklukar21a.pdf", "supp": "", "pdf_size": 2820929, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1763637443737261657&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "KTH Royal Institute of Technology, Stockholm, Sweden; KTH Royal Institute of Technology, Stockholm, Sweden; KTH Royal Institute of Technology, Stockholm, Sweden", "aff_domain": "kth.se; ; ", "email": "kth.se; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/poklukar21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "KTH Royal Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.kth.se", "aff_unique_abbr": "KTH", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stockholm", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Sweden" }, { "title": "Geometric convergence of elliptical slice sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10663", "id": "10663", "proceeding": "http://proceedings.mlr.press/v139/natarovskii21a.html", "slides": "/media/icml-2021/Slides/10663.pdf", "author_site": "Viacheslav Natarovskii, Daniel Rudolf, Bj\u00f6rn Sprungk", "author": "Viacheslav Natarovskii; Daniel Rudolf; Bj\u00f6rn Sprungk", "abstract": "For Bayesian learning, given likelihood function and Gaussian prior, the elliptical slice sampler, introduced by Murray, Adams and MacKay 2010, provides a tool for the construction of a Markov chain for approximate sampling of the underlying posterior distribution. Besides of its wide applicability and simplicity its main feature is that no tuning is necessary. Under weak regularity assumptions on the posterior density we show that the corresponding Markov chain is geometrically ergodic and therefore yield qualitative convergence guarantees. We illustrate our result for Gaussian posteriors as they appear in Gaussian process regression in a fully Gaussian scenario, which for example is exhibited in Gaussian process regression, as well as in a setting of a multi-modal distribution. Remarkably, our numerical experiments indicate a dimension-independent performance of elliptical slice sampling even in situations where our ergodicity result does not apply.", "bibtex": "@InProceedings{pmlr-v139-natarovskii21a,\n title = \t {Geometric convergence of elliptical slice sampling},\n author = {Natarovskii, Viacheslav and Rudolf, Daniel and Sprungk, Bj{\\\"o}rn},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7969--7978},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/natarovskii21a/natarovskii21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/natarovskii21a.html},\n abstract = \t {For Bayesian learning, given likelihood function and Gaussian prior, the elliptical slice sampler, introduced by Murray, Adams and MacKay 2010, provides a tool for the construction of a Markov chain for approximate sampling of the underlying posterior distribution. Besides of its wide applicability and simplicity its main feature is that no tuning is necessary. Under weak regularity assumptions on the posterior density we show that the corresponding Markov chain is geometrically ergodic and therefore yield qualitative convergence guarantees. We illustrate our result for Gaussian posteriors as they appear in Gaussian process regression in a fully Gaussian scenario, which for example is exhibited in Gaussian process regression, as well as in a setting of a multi-modal distribution. Remarkably, our numerical experiments indicate a dimension-independent performance of elliptical slice sampling even in situations where our ergodicity result does not apply.}\n}", "pdf": "http://proceedings.mlr.press/v139/natarovskii21a/natarovskii21a.pdf", "supp": "", "pdf_size": 782163, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7624811845482440047&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Institute for Mathematical Stochastics, Georg-August-Universit\u00e4t G\u00f6ttingen, G\u00f6ttingen, Germany; Institute for Mathematical Stochastics, Georg-August-Universit\u00e4t G\u00f6ttingen, G\u00f6ttingen, Germany; Faculty of Mathematics and Computer Science, Technische Universit\u00e4t Bergakademie Freiberg, Germany", "aff_domain": "uni-goettingen.de;uni-goettingen.de;math.tu-freiberg.de", "email": "uni-goettingen.de;uni-goettingen.de;math.tu-freiberg.de", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/natarovskii21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Georg-August-Universit\u00e4t G\u00f6ttingen;Technische Universit\u00e4t Bergakademie Freiberg", "aff_unique_dep": "Institute for Mathematical Stochastics;Faculty of Mathematics and Computer Science", "aff_unique_url": "https://www.uni-goettingen.de;https://www.tu-freiberg.de", "aff_unique_abbr": "GAU;TUBAF", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "G\u00f6ttingen;Freiberg", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Geometry of the Loss Landscape in Overparameterized Neural Networks: Symmetries and Invariances", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10331", "id": "10331", "proceeding": "http://proceedings.mlr.press/v139/simsek21a.html", "slides": "", "author_site": "Berfin Simsek, Fran\u00e7ois Ged, Arthur Jacot, Francesco Spadaro, Clement Hongler, Wulfram Gerstner, Johanni Brea", "author": "Berfin Simsek; Fran\u00e7ois Ged; Arthur Jacot; Francesco Spadaro; Clement Hongler; Wulfram Gerstner; Johanni Brea", "abstract": "We study how permutation symmetries in overparameterized multi-layer neural networks generate \u2018symmetry-induced\u2019 critical points. Assuming a network with $ L $ layers of minimal widths $ r_1^*, \\ldots, r_{L-1}^* $ reaches a zero-loss minimum at $ r_1^*! \\cdots r_{L-1}^*! $ isolated points that are permutations of one another, we show that adding one extra neuron to each layer is sufficient to connect all these previously discrete minima into a single manifold. For a two-layer overparameterized network of width $ r^*+ h =: m $ we explicitly describe the manifold of global minima: it consists of $ T(r^*, m) $ affine subspaces of dimension at least $ h $ that are connected to one another. For a network of width $m$, we identify the number $G(r,m)$ of affine subspaces containing only symmetry-induced critical points that are related to the critical points of a smaller network of width $r", "bibtex": "@InProceedings{pmlr-v139-simsek21a,\n title = \t {Geometry of the Loss Landscape in Overparameterized Neural Networks: Symmetries and Invariances},\n author = {Simsek, Berfin and Ged, Fran{\\c{c}}ois and Jacot, Arthur and Spadaro, Francesco and Hongler, Clement and Gerstner, Wulfram and Brea, Johanni},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9722--9732},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/simsek21a/simsek21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/simsek21a.html},\n abstract = \t {We study how permutation symmetries in overparameterized multi-layer neural networks generate \u2018symmetry-induced\u2019 critical points. Assuming a network with $ L $ layers of minimal widths $ r_1^*, \\ldots, r_{L-1}^* $ reaches a zero-loss minimum at $ r_1^*! \\cdots r_{L-1}^*! $ isolated points that are permutations of one another, we show that adding one extra neuron to each layer is sufficient to connect all these previously discrete minima into a single manifold. For a two-layer overparameterized network of width $ r^*+ h =: m $ we explicitly describe the manifold of global minima: it consists of $ T(r^*, m) $ affine subspaces of dimension at least $ h $ that are connected to one another. For a network of width $m$, we identify the number $G(r,m)$ of affine subspaces containing only symmetry-induced critical points that are related to the critical points of a smaller network of width $r", "pdf": "http://proceedings.mlr.press/v139/simsek21a/simsek21a.pdf", "supp": "", "pdf_size": 1526814, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6069341273217919605&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland + Laboratory of Computational Neuroscience, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland + Laboratory of Computational Neuroscience, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Laboratory of Computational Neuroscience, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland + Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland; Laboratory of Computational Neuroscience, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland + Chair of Statistical Field Theory, \u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland", "aff_domain": "epfl.ch; ; ; ; ; ;", "email": "epfl.ch; ; ; ; ; ;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/simsek21a.html", "aff_unique_index": "0+0;0;0;0;0+0;0+0;0+0", "aff_unique_norm": "EPFL", "aff_unique_dep": "Chair of Statistical Field Theory", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0;0+0;0+0;0+0", "aff_country_unique": "Switzerland" }, { "title": "Global Convergence of Policy Gradient for Linear-Quadratic Mean-Field Control/Game in Continuous Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8961", "id": "8961", "proceeding": "http://proceedings.mlr.press/v139/wang21j.html", "slides": "", "author_site": "Weichen Wang, Jiequn Han, Zhuoran Yang, Zhaoran Wang", "author": "Weichen Wang; Jiequn Han; Zhuoran Yang; Zhaoran Wang", "abstract": "Recent years have witnessed the success of multi-agent reinforcement learning, which has motivated new research directions for mean-field control (MFC) and mean-field game (MFG), as the multi-agent system can be well approximated by a mean-field problem when the number of agents grows to be very large. In this paper, we study the policy gradient (PG) method for the linear-quadratic mean-field control and game, where we assume each agent has identical linear state transitions and quadratic cost functions. While most recent works on policy gradient for MFC and MFG are based on discrete-time models, we focus on a continuous-time model where some of our analyzing techniques could be valuable to the interested readers. For both the MFC and the MFG, we provide PG update and show that it converges to the optimal solution at a linear rate, which is verified by a synthetic simulation. For the MFG, we also provide sufficient conditions for the existence and uniqueness of the Nash equilibrium.", "bibtex": "@InProceedings{pmlr-v139-wang21j,\n title = \t {Global Convergence of Policy Gradient for Linear-Quadratic Mean-Field Control/Game in Continuous Time},\n author = {Wang, Weichen and Han, Jiequn and Yang, Zhuoran and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10772--10782},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21j/wang21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21j.html},\n abstract = \t {Recent years have witnessed the success of multi-agent reinforcement learning, which has motivated new research directions for mean-field control (MFC) and mean-field game (MFG), as the multi-agent system can be well approximated by a mean-field problem when the number of agents grows to be very large. In this paper, we study the policy gradient (PG) method for the linear-quadratic mean-field control and game, where we assume each agent has identical linear state transitions and quadratic cost functions. While most recent works on policy gradient for MFC and MFG are based on discrete-time models, we focus on a continuous-time model where some of our analyzing techniques could be valuable to the interested readers. For both the MFC and the MFG, we provide PG update and show that it converges to the optimal solution at a linear rate, which is verified by a synthetic simulation. For the MFG, we also provide sufficient conditions for the existence and uniqueness of the Nash equilibrium.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21j/wang21j.pdf", "supp": "", "pdf_size": 370572, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3960693081087444944&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Faculty of Business and Economics, The University of Hong Kong; Dept. of Mathematics, Princeton University; Dept. of Operations Research & Financial Engineering, Princeton University; Dept. of Industrial Engineering & Management Sciences, Northwestern University", "aff_domain": "gmail.com;princeton.edu;princeton.edu;northwestern.edu", "email": "gmail.com;princeton.edu;princeton.edu;northwestern.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21j.html", "aff_unique_index": "0;1;1;2", "aff_unique_norm": "University of Hong Kong;Princeton University;Northwestern University", "aff_unique_dep": "Faculty of Business and Economics;Department of Mathematics;Dept. of Industrial Engineering & Management Sciences", "aff_unique_url": "https://www.hku.hk;https://www.princeton.edu;https://www.northwestern.edu", "aff_unique_abbr": "HKU;Princeton;NU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Hong Kong SAR;Princeton;", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "China;United States" }, { "title": "Global Optimality Beyond Two Layers: Training Deep ReLU Networks via Convex Programs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10037", "id": "10037", "proceeding": "http://proceedings.mlr.press/v139/ergen21a.html", "slides": "/media/icml-2021/Slides/10037.pdf", "author_site": "Tolga Ergen, Mert Pilanci", "author": "Tolga Ergen; Mert Pilanci", "abstract": "Understanding the fundamental mechanism behind the success of deep neural networks is one of the key challenges in the modern machine learning literature. Despite numerous attempts, a solid theoretical analysis is yet to be developed. In this paper, we develop a novel unified framework to reveal a hidden regularization mechanism through the lens of convex optimization. We first show that the training of multiple three-layer ReLU sub-networks with weight decay regularization can be equivalently cast as a convex optimization problem in a higher dimensional space, where sparsity is enforced via a group $\\ell_1$-norm regularization. Consequently, ReLU networks can be interpreted as high dimensional feature selection methods. More importantly, we then prove that the equivalent convex problem can be globally optimized by a standard convex optimization solver with a polynomial-time complexity with respect to the number of samples and data dimension when the width of the network is fixed. Finally, we numerically validate our theoretical results via experiments involving both synthetic and real datasets.", "bibtex": "@InProceedings{pmlr-v139-ergen21a,\n title = \t {Global Optimality Beyond Two Layers: Training Deep ReLU Networks via Convex Programs},\n author = {Ergen, Tolga and Pilanci, Mert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2993--3003},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ergen21a/ergen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ergen21a.html},\n abstract = \t {Understanding the fundamental mechanism behind the success of deep neural networks is one of the key challenges in the modern machine learning literature. Despite numerous attempts, a solid theoretical analysis is yet to be developed. In this paper, we develop a novel unified framework to reveal a hidden regularization mechanism through the lens of convex optimization. We first show that the training of multiple three-layer ReLU sub-networks with weight decay regularization can be equivalently cast as a convex optimization problem in a higher dimensional space, where sparsity is enforced via a group $\\ell_1$-norm regularization. Consequently, ReLU networks can be interpreted as high dimensional feature selection methods. More importantly, we then prove that the equivalent convex problem can be globally optimized by a standard convex optimization solver with a polynomial-time complexity with respect to the number of samples and data dimension when the width of the network is fixed. Finally, we numerically validate our theoretical results via experiments involving both synthetic and real datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/ergen21a/ergen21a.pdf", "supp": "", "pdf_size": 1359171, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5465987581304790041&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering, Stanford University, CA, USA; Department of Electrical Engineering, Stanford University, CA, USA", "aff_domain": "stanford.edu;stanford.edu", "email": "stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ergen21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Global Prosody Style Transfer Without Text Transcriptions", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9835", "id": "9835", "proceeding": "http://proceedings.mlr.press/v139/qian21b.html", "slides": "", "author_site": "Kaizhi Qian, Yang Zhang, Shiyu Chang, Jinjun Xiong, Chuang Gan, David Cox, Mark Hasegawa-Johnson", "author": "Kaizhi Qian; Yang Zhang; Shiyu Chang; Jinjun Xiong; Chuang Gan; David Cox; Mark Hasegawa-Johnson", "abstract": "Prosody plays an important role in characterizing the style of a speaker or an emotion, but most non-parallel voice or emotion style transfer algorithms do not convert any prosody information. Two major components of prosody are pitch and rhythm. Disentangling the prosody information, particularly the rhythm component, from the speech is challenging because it involves breaking the synchrony between the input speech and the disentangled speech representation. As a result, most existing prosody style transfer algorithms would need to rely on some form of text transcriptions to identify the content information, which confines their application to high-resource languages only. Recently, SpeechSplit has made sizeable progress towards unsupervised prosody style transfer, but it is unable to extract high-level global prosody style in an unsupervised manner. In this paper, we propose AutoPST, which can disentangle global prosody style from speech without relying on any text transcriptions. AutoPST is an Autoencoder-based Prosody Style Transfer framework with a thorough rhythm removal module guided by the self-expressive representation learning. Experiments on different style transfer tasks show that AutoPST can effectively convert prosody that correctly reflects the styles of the target domains.", "bibtex": "@InProceedings{pmlr-v139-qian21b,\n title = \t {Global Prosody Style Transfer Without Text Transcriptions},\n author = {Qian, Kaizhi and Zhang, Yang and Chang, Shiyu and Xiong, Jinjun and Gan, Chuang and Cox, David and Hasegawa-Johnson, Mark},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8650--8660},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qian21b/qian21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/qian21b.html},\n abstract = \t {Prosody plays an important role in characterizing the style of a speaker or an emotion, but most non-parallel voice or emotion style transfer algorithms do not convert any prosody information. Two major components of prosody are pitch and rhythm. Disentangling the prosody information, particularly the rhythm component, from the speech is challenging because it involves breaking the synchrony between the input speech and the disentangled speech representation. As a result, most existing prosody style transfer algorithms would need to rely on some form of text transcriptions to identify the content information, which confines their application to high-resource languages only. Recently, SpeechSplit has made sizeable progress towards unsupervised prosody style transfer, but it is unable to extract high-level global prosody style in an unsupervised manner. In this paper, we propose AutoPST, which can disentangle global prosody style from speech without relying on any text transcriptions. AutoPST is an Autoencoder-based Prosody Style Transfer framework with a thorough rhythm removal module guided by the self-expressive representation learning. Experiments on different style transfer tasks show that AutoPST can effectively convert prosody that correctly reflects the styles of the target domains.}\n}", "pdf": "http://proceedings.mlr.press/v139/qian21b/qian21b.pdf", "supp": "", "pdf_size": 1576656, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1615418874134295628&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "MIT-IBM Watson AI Lab, USA+IBM Thomas J. Watson Research Center, USA; MIT-IBM Watson AI Lab, USA+IBM Thomas J. Watson Research Center, USA; MIT-IBM Watson AI Lab, USA+IBM Thomas J. Watson Research Center, USA; IBM Thomas J. Watson Research Center, USA; MIT-IBM Watson AI Lab, USA+IBM Thomas J. Watson Research Center, USA; MIT-IBM Watson AI Lab, USA+IBM Thomas J. Watson Research Center, USA; University of Illinois at Urbana-Champaign, USA", "aff_domain": "gmail.com;ibm.com; ; ; ; ; ", "email": "gmail.com;ibm.com; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/qian21b.html", "aff_unique_index": "0+0;0+0;0+0;0;0+0;0+0;1", "aff_unique_norm": "IBM;University of Illinois Urbana-Champaign", "aff_unique_dep": "AI Lab;", "aff_unique_url": ";https://illinois.edu", "aff_unique_abbr": "MIT-IBM AI Lab;UIUC", "aff_campus_unique_index": ";;;;;1", "aff_campus_unique": ";Urbana-Champaign", "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Global inducing point variational posteriors for Bayesian neural networks and deep Gaussian processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9139", "id": "9139", "proceeding": "http://proceedings.mlr.press/v139/ober21a.html", "slides": "", "author_site": "Sebastian Ober, Laurence Aitchison", "author": "Sebastian W Ober; Laurence Aitchison", "abstract": "We consider the optimal approximate posterior over the top-layer weights in a Bayesian neural network for regression, and show that it exhibits strong dependencies on the lower-layer weights. We adapt this result to develop a correlated approximate posterior over the weights at all layers in a Bayesian neural network. We extend this approach to deep Gaussian processes, unifying inference in the two model classes. Our approximate posterior uses learned \"global\u201d inducing points, which are defined only at the input layer and propagated through the network to obtain inducing inputs at subsequent layers. By contrast, standard, \"local\u201d, inducing point methods from the deep Gaussian process literature optimise a separate set of inducing inputs at every layer, and thus do not model correlations across layers. Our method gives state-of-the-art performance for a variational Bayesian method, without data augmentation or tempering, on CIFAR-10 of 86.7%, which is comparable to SGMCMC without tempering but with data augmentation (88% in Wenzel et al. 2020).", "bibtex": "@InProceedings{pmlr-v139-ober21a,\n title = \t {Global inducing point variational posteriors for Bayesian neural networks and deep Gaussian processes},\n author = {Ober, Sebastian W and Aitchison, Laurence},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8248--8259},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ober21a/ober21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ober21a.html},\n abstract = \t {We consider the optimal approximate posterior over the top-layer weights in a Bayesian neural network for regression, and show that it exhibits strong dependencies on the lower-layer weights. We adapt this result to develop a correlated approximate posterior over the weights at all layers in a Bayesian neural network. We extend this approach to deep Gaussian processes, unifying inference in the two model classes. Our approximate posterior uses learned \"global\u201d inducing points, which are defined only at the input layer and propagated through the network to obtain inducing inputs at subsequent layers. By contrast, standard, \"local\u201d, inducing point methods from the deep Gaussian process literature optimise a separate set of inducing inputs at every layer, and thus do not model correlations across layers. Our method gives state-of-the-art performance for a variational Bayesian method, without data augmentation or tempering, on CIFAR-10 of 86.7%, which is comparable to SGMCMC without tempering but with data augmentation (88% in Wenzel et al. 2020).}\n}", "pdf": "http://proceedings.mlr.press/v139/ober21a/ober21a.pdf", "supp": "", "pdf_size": 379627, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8024621603786330099&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Engineering, University of Cambridge, Cambridge, UK; Department of Computer Science, University of Bristol, Bristol, UK", "aff_domain": "eng.cam.ac.uk;gmail.com", "email": "eng.cam.ac.uk;gmail.com", "github": "github.com/LaurenceA/bayesfunc", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ober21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Cambridge;University of Bristol", "aff_unique_dep": "Department of Engineering;Department of Computer Science", "aff_unique_url": "https://www.cam.ac.uk;https://www.bristol.ac.uk", "aff_unique_abbr": "Cambridge;UoB", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Bristol", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Globally-Robust Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9707", "id": "9707", "proceeding": "http://proceedings.mlr.press/v139/leino21a.html", "slides": "/media/icml-2021/Slides/9707.pdf", "author_site": "Klas Leino, Zifan Wang, Matt Fredrikson", "author": "Klas Leino; Zifan Wang; Matt Fredrikson", "abstract": "The threat of adversarial examples has motivated work on training certifiably robust neural networks to facilitate efficient verification of local robustness at inference time. We formalize a notion of global robustness, which captures the operational properties of on-line local robustness certification while yielding a natural learning objective for robust training. We show that widely-used architectures can be easily adapted to this objective by incorporating efficient global Lipschitz bounds into the network, yielding certifiably-robust models by construction that achieve state-of-the-art verifiable accuracy. Notably, this approach requires significantly less time and memory than recent certifiable training methods, and leads to negligible costs when certifying points on-line; for example, our evaluation shows that it is possible to train a large robust Tiny-Imagenet model in a matter of hours. Our models effectively leverage inexpensive global Lipschitz bounds for real-time certification, despite prior suggestions that tighter local bounds are needed for good performance; we posit this is possible because our models are specifically trained to achieve tighter global bounds. Namely, we prove that the maximum achievable verifiable accuracy for a given dataset is not improved by using a local bound.", "bibtex": "@InProceedings{pmlr-v139-leino21a,\n title = \t {Globally-Robust Neural Networks},\n author = {Leino, Klas and Wang, Zifan and Fredrikson, Matt},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6212--6222},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/leino21a/leino21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/leino21a.html},\n abstract = \t {The threat of adversarial examples has motivated work on training certifiably robust neural networks to facilitate efficient verification of local robustness at inference time. We formalize a notion of global robustness, which captures the operational properties of on-line local robustness certification while yielding a natural learning objective for robust training. We show that widely-used architectures can be easily adapted to this objective by incorporating efficient global Lipschitz bounds into the network, yielding certifiably-robust models by construction that achieve state-of-the-art verifiable accuracy. Notably, this approach requires significantly less time and memory than recent certifiable training methods, and leads to negligible costs when certifying points on-line; for example, our evaluation shows that it is possible to train a large robust Tiny-Imagenet model in a matter of hours. Our models effectively leverage inexpensive global Lipschitz bounds for real-time certification, despite prior suggestions that tighter local bounds are needed for good performance; we posit this is possible because our models are specifically trained to achieve tighter global bounds. Namely, we prove that the maximum achievable verifiable accuracy for a given dataset is not improved by using a local bound.}\n}", "pdf": "http://proceedings.mlr.press/v139/leino21a/leino21a.pdf", "supp": "", "pdf_size": 540130, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8564874255784830612&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ;cmu.edu", "email": "cs.cmu.edu; ;cmu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/leino21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Goal-Conditioned Reinforcement Learning with Imagined Subgoals", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10147", "id": "10147", "proceeding": "http://proceedings.mlr.press/v139/chane-sane21a.html", "slides": "", "author_site": "Elliot Chane-Sane, Cordelia Schmid, Ivan Laptev", "author": "Elliot Chane-Sane; Cordelia Schmid; Ivan Laptev", "abstract": "Goal-conditioned reinforcement learning endows an agent with a large variety of skills, but it often struggles to solve tasks that require more temporally extended reasoning. In this work, we propose to incorporate imagined subgoals into policy learning to facilitate learning of complex tasks. Imagined subgoals are predicted by a separate high-level policy, which is trained simultaneously with the policy and its critic. This high-level policy predicts intermediate states halfway to the goal using the value function as a reachability metric. We don\u2019t require the policy to reach these subgoals explicitly. Instead, we use them to define a prior policy, and incorporate this prior into a KL-constrained policy iteration scheme to speed up and regularize learning. Imagined subgoals are used during policy learning, but not during test time, where we only apply the learned policy. We evaluate our approach on complex robotic navigation and manipulation tasks and show that it outperforms existing methods by a large margin.", "bibtex": "@InProceedings{pmlr-v139-chane-sane21a,\n title = \t {Goal-Conditioned Reinforcement Learning with Imagined Subgoals},\n author = {Chane-Sane, Elliot and Schmid, Cordelia and Laptev, Ivan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1430--1440},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chane-sane21a/chane-sane21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chane-sane21a.html},\n abstract = \t {Goal-conditioned reinforcement learning endows an agent with a large variety of skills, but it often struggles to solve tasks that require more temporally extended reasoning. In this work, we propose to incorporate imagined subgoals into policy learning to facilitate learning of complex tasks. Imagined subgoals are predicted by a separate high-level policy, which is trained simultaneously with the policy and its critic. This high-level policy predicts intermediate states halfway to the goal using the value function as a reachability metric. We don\u2019t require the policy to reach these subgoals explicitly. Instead, we use them to define a prior policy, and incorporate this prior into a KL-constrained policy iteration scheme to speed up and regularize learning. Imagined subgoals are used during policy learning, but not during test time, where we only apply the learned policy. We evaluate our approach on complex robotic navigation and manipulation tasks and show that it outperforms existing methods by a large margin.}\n}", "pdf": "http://proceedings.mlr.press/v139/chane-sane21a/chane-sane21a.pdf", "supp": "", "pdf_size": 1359872, "gs_citation": 170, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15028507466475450316&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Inria, \u00b4Ecole normale suprieure, CNRS, PSL Research University, 75005 Paris, France; Inria, \u00b4Ecole normale suprieure, CNRS, PSL Research University, 75005 Paris, France; Inria, \u00b4Ecole normale suprieure, CNRS, PSL Research University, 75005 Paris, France", "aff_domain": "inria.fr; ; ", "email": "inria.fr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chane-sane21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "INRIA", "aff_unique_dep": "", "aff_unique_url": "https://www.inria.fr", "aff_unique_abbr": "Inria", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "Grad-TTS: A Diffusion Probabilistic Model for Text-to-Speech", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8569", "id": "8569", "proceeding": "http://proceedings.mlr.press/v139/popov21a.html", "slides": "", "author_site": "Vadim Popov, Ivan Vovk, Vladimir Gogoryan, Tasnima Sadekova, Mikhail Kudinov", "author": "Vadim Popov; Ivan Vovk; Vladimir Gogoryan; Tasnima Sadekova; Mikhail Kudinov", "abstract": "Recently, denoising diffusion probabilistic models and generative score matching have shown high potential in modelling complex data distributions while stochastic calculus has provided a unified point of view on these techniques allowing for flexible inference schemes. In this paper we introduce Grad-TTS, a novel text-to-speech model with score-based decoder producing mel-spectrograms by gradually transforming noise predicted by encoder and aligned with text input by means of Monotonic Alignment Search. The framework of stochastic differential equations helps us to generalize conventional diffusion probabilistic models to the case of reconstructing data from noise with different parameters and allows to make this reconstruction flexible by explicitly controlling trade-off between sound quality and inference speed. Subjective human evaluation shows that Grad-TTS is competitive with state-of-the-art text-to-speech approaches in terms of Mean Opinion Score.", "bibtex": "@InProceedings{pmlr-v139-popov21a,\n title = \t {Grad-TTS: A Diffusion Probabilistic Model for Text-to-Speech},\n author = {Popov, Vadim and Vovk, Ivan and Gogoryan, Vladimir and Sadekova, Tasnima and Kudinov, Mikhail},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8599--8608},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/popov21a/popov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/popov21a.html},\n abstract = \t {Recently, denoising diffusion probabilistic models and generative score matching have shown high potential in modelling complex data distributions while stochastic calculus has provided a unified point of view on these techniques allowing for flexible inference schemes. In this paper we introduce Grad-TTS, a novel text-to-speech model with score-based decoder producing mel-spectrograms by gradually transforming noise predicted by encoder and aligned with text input by means of Monotonic Alignment Search. The framework of stochastic differential equations helps us to generalize conventional diffusion probabilistic models to the case of reconstructing data from noise with different parameters and allows to make this reconstruction flexible by explicitly controlling trade-off between sound quality and inference speed. Subjective human evaluation shows that Grad-TTS is competitive with state-of-the-art text-to-speech approaches in terms of Mean Opinion Score.}\n}", "pdf": "http://proceedings.mlr.press/v139/popov21a/popov21a.pdf", "supp": "", "pdf_size": 2700580, "gs_citation": 635, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6905767521784147251&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Huawei Noah\u2019s Ark Lab, Moscow, Russia; Huawei Noah\u2019s Ark Lab, Moscow, Russia + Higher School of Economics, Moscow, Russia; Huawei Noah\u2019s Ark Lab, Moscow, Russia + Higher School of Economics, Moscow, Russia; Huawei Noah\u2019s Ark Lab, Moscow, Russia; Huawei Noah\u2019s Ark Lab, Moscow, Russia", "aff_domain": "huawei.com;huawei.com; ; ; ", "email": "huawei.com;huawei.com; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/popov21a.html", "aff_unique_index": "0;0+1;0+1;0;0", "aff_unique_norm": "Huawei;Higher School of Economics", "aff_unique_dep": "Huawei Noah\u2019s Ark Lab;", "aff_unique_url": "https://www.huawei.com;https://www.hse.ru", "aff_unique_abbr": "HNAL;HSE", "aff_campus_unique_index": "0;0+0;0+0;0;0", "aff_campus_unique": "Moscow", "aff_country_unique_index": "0;0+0;0+0;0;0", "aff_country_unique": "Russian Federation" }, { "title": "Gradient Disaggregation: Breaking Privacy in Federated Learning by Reconstructing the User Participant Matrix", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8851", "id": "8851", "proceeding": "http://proceedings.mlr.press/v139/lam21b.html", "slides": "/media/icml-2021/Slides/8851.pdf", "author_site": "Maximilian Lam, Gu-Yeon Wei, David Brooks, Vijay Janapa Reddi, Michael Mitzenmacher", "author": "Maximilian Lam; Gu-Yeon Wei; David Brooks; Vijay Janapa Reddi; Michael Mitzenmacher", "abstract": "We show that aggregated model updates in federated learning may be insecure. An untrusted central server may disaggregate user updates from sums of updates across participants given repeated observations, enabling the server to recover privileged information about individual users\u2019 private training data via traditional gradient inference attacks. Our method revolves around reconstructing participant information (e.g: which rounds of training users participated in) from aggregated model updates by leveraging summary information from device analytics commonly used to monitor, debug, and manage federated learning systems. Our attack is parallelizable and we successfully disaggregate user updates on settings with up to thousands of participants. We quantitatively and qualitatively demonstrate significant improvements in the capability of various inference attacks on the disaggregated updates. Our attack enables the attribution of learned properties to individual users, violating anonymity, and shows that a determined central server may undermine the secure aggregation protocol to break individual users\u2019 data privacy in federated learning.", "bibtex": "@InProceedings{pmlr-v139-lam21b,\n title = \t {Gradient Disaggregation: Breaking Privacy in Federated Learning by Reconstructing the User Participant Matrix},\n author = {Lam, Maximilian and Wei, Gu-Yeon and Brooks, David and Reddi, Vijay Janapa and Mitzenmacher, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5959--5968},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lam21b/lam21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/lam21b.html},\n abstract = \t {We show that aggregated model updates in federated learning may be insecure. An untrusted central server may disaggregate user updates from sums of updates across participants given repeated observations, enabling the server to recover privileged information about individual users\u2019 private training data via traditional gradient inference attacks. Our method revolves around reconstructing participant information (e.g: which rounds of training users participated in) from aggregated model updates by leveraging summary information from device analytics commonly used to monitor, debug, and manage federated learning systems. Our attack is parallelizable and we successfully disaggregate user updates on settings with up to thousands of participants. We quantitatively and qualitatively demonstrate significant improvements in the capability of various inference attacks on the disaggregated updates. Our attack enables the attribution of learned properties to individual users, violating anonymity, and shows that a determined central server may undermine the secure aggregation protocol to break individual users\u2019 data privacy in federated learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/lam21b/lam21b.pdf", "supp": "", "pdf_size": 5185281, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1910992678848824138&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Harvard University; Harvard University; Harvard University; Harvard University; Harvard University", "aff_domain": "g.harvard.edu; ; ; ; ", "email": "g.harvard.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lam21b.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "", "aff_unique_url": "https://www.harvard.edu", "aff_unique_abbr": "Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Graph Contrastive Learning Automated", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9823", "id": "9823", "proceeding": "http://proceedings.mlr.press/v139/you21a.html", "slides": "/media/icml-2021/Slides/9823.pdf", "author_site": "Yuning You, Tianlong Chen, Yang Shen, Zhangyang \u201cAtlas\u201d Wang", "author": "Yuning You; Tianlong Chen; Yang Shen; Zhangyang Wang", "abstract": "Self-supervised learning on graph-structured data has drawn recent interest for learning generalizable, transferable and robust representations from unlabeled graphs. Among many, graph contrastive learning (GraphCL) has emerged with promising representation learning performance. Unfortunately, unlike its counterpart on image data, the effectiveness of GraphCL hinges on ad-hoc data augmentations, which have to be manually picked per dataset, by either rules of thumb or trial-and-errors, owing to the diverse nature of graph data. That significantly limits the more general applicability of GraphCL. Aiming to fill in this crucial gap, this paper proposes a unified bi-level optimization framework to automatically, adaptively and dynamically select data augmentations when performing GraphCL on specific graph data. The general framework, dubbed JOint Augmentation Optimization (JOAO), is instantiated as min-max optimization. The selections of augmentations made by JOAO are shown to be in general aligned with previous \"best practices\" observed from handcrafted tuning: yet now being automated, more flexible and versatile. Moreover, we propose a new augmentation-aware projection head mechanism, which will route output features through different projection heads corresponding to different augmentations chosen at each training step. Extensive experiments demonstrate that JOAO performs on par with or sometimes better than the state-of-the-art competitors including GraphCL, on multiple graph datasets of various scales and types, yet without resorting to any laborious dataset-specific tuning on augmentation selection. We release the code at https://github.com/Shen-Lab/GraphCL_Automated.", "bibtex": "@InProceedings{pmlr-v139-you21a,\n title = \t {Graph Contrastive Learning Automated},\n author = {You, Yuning and Chen, Tianlong and Shen, Yang and Wang, Zhangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12121--12132},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/you21a/you21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/you21a.html},\n abstract = \t {Self-supervised learning on graph-structured data has drawn recent interest for learning generalizable, transferable and robust representations from unlabeled graphs. Among many, graph contrastive learning (GraphCL) has emerged with promising representation learning performance. Unfortunately, unlike its counterpart on image data, the effectiveness of GraphCL hinges on ad-hoc data augmentations, which have to be manually picked per dataset, by either rules of thumb or trial-and-errors, owing to the diverse nature of graph data. That significantly limits the more general applicability of GraphCL. Aiming to fill in this crucial gap, this paper proposes a unified bi-level optimization framework to automatically, adaptively and dynamically select data augmentations when performing GraphCL on specific graph data. The general framework, dubbed JOint Augmentation Optimization (JOAO), is instantiated as min-max optimization. The selections of augmentations made by JOAO are shown to be in general aligned with previous \"best practices\" observed from handcrafted tuning: yet now being automated, more flexible and versatile. Moreover, we propose a new augmentation-aware projection head mechanism, which will route output features through different projection heads corresponding to different augmentations chosen at each training step. Extensive experiments demonstrate that JOAO performs on par with or sometimes better than the state-of-the-art competitors including GraphCL, on multiple graph datasets of various scales and types, yet without resorting to any laborious dataset-specific tuning on augmentation selection. We release the code at https://github.com/Shen-Lab/GraphCL_Automated.}\n}", "pdf": "http://proceedings.mlr.press/v139/you21a/you21a.pdf", "supp": "", "pdf_size": 947368, "gs_citation": 612, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4319391299971749370&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Texas A&M University; The University of Texas at Austin; Texas A&M University; The University of Texas at Austin", "aff_domain": "tamu.edu;utexas.edu;tamu.edu;utexas.edu", "email": "tamu.edu;utexas.edu;tamu.edu;utexas.edu", "github": "https://github.com/Shen-Lab/GraphCL_Automated", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/you21a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "Texas A&M University;University of Texas at Austin", "aff_unique_dep": ";", "aff_unique_url": "https://www.tamu.edu;https://www.utexas.edu", "aff_unique_abbr": "TAMU;UT Austin", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Graph Convolution for Semi-Supervised Classification: Improved Linear Separability and Out-of-Distribution Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8741", "id": "8741", "proceeding": "http://proceedings.mlr.press/v139/baranwal21a.html", "slides": "/media/icml-2021/Slides/8741.pdf", "author_site": "Aseem Baranwal, Kimon Fountoulakis, Aukosh Jagannath", "author": "Aseem Baranwal; Kimon Fountoulakis; Aukosh Jagannath", "abstract": "Recently there has been increased interest in semi-supervised classification in the presence of graphical information. A new class of learning models has emerged that relies, at its most basic level, on classifying the data after first applying a graph convolution. To understand the merits of this approach, we study the classification of a mixture of Gaussians, where the data corresponds to the node attributes of a stochastic block model. We show that graph convolution extends the regime in which the data is linearly separable by a factor of roughly $1/\\sqrt{D}$, where $D$ is the expected degree of a node, as compared to the mixture model data on its own. Furthermore, we find that the linear classifier obtained by minimizing the cross-entropy loss after the graph convolution generalizes to out-of-distribution data where the unseen data can have different intra- and inter-class edge probabilities from the training data.", "bibtex": "@InProceedings{pmlr-v139-baranwal21a,\n title = \t {Graph Convolution for Semi-Supervised Classification: Improved Linear Separability and Out-of-Distribution Generalization},\n author = {Baranwal, Aseem and Fountoulakis, Kimon and Jagannath, Aukosh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {684--693},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/baranwal21a/baranwal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/baranwal21a.html},\n abstract = \t {Recently there has been increased interest in semi-supervised classification in the presence of graphical information. A new class of learning models has emerged that relies, at its most basic level, on classifying the data after first applying a graph convolution. To understand the merits of this approach, we study the classification of a mixture of Gaussians, where the data corresponds to the node attributes of a stochastic block model. We show that graph convolution extends the regime in which the data is linearly separable by a factor of roughly $1/\\sqrt{D}$, where $D$ is the expected degree of a node, as compared to the mixture model data on its own. Furthermore, we find that the linear classifier obtained by minimizing the cross-entropy loss after the graph convolution generalizes to out-of-distribution data where the unseen data can have different intra- and inter-class edge probabilities from the training data.}\n}", "pdf": "http://proceedings.mlr.press/v139/baranwal21a/baranwal21a.pdf", "supp": "", "pdf_size": 8193510, "gs_citation": 93, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5101690322548064478&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "David R. Cheriton School of Computer Science, University of Waterloo, Waterloo, Canada; David R. Cheriton School of Computer Science, University of Waterloo, Waterloo, Canada; Department of Statistics and Actuarial Science, Department of Applied Mathematics, University of Waterloo, Waterloo, Canada", "aff_domain": "uwaterloo.ca; ; ", "email": "uwaterloo.ca; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/baranwal21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Waterloo", "aff_unique_dep": "David R. Cheriton School of Computer Science", "aff_unique_url": "https://uwaterloo.ca", "aff_unique_abbr": "UWaterloo", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Waterloo", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "title": "Graph Cuts Always Find a Global Optimum for Potts Models (With a Catch)", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10087", "id": "10087", "proceeding": "http://proceedings.mlr.press/v139/lang21a.html", "slides": "", "author_site": "Hunter Lang, David Sontag, Aravindan Vijayaraghavan", "author": "Hunter Lang; David Sontag; Aravindan Vijayaraghavan", "abstract": "We prove that the alpha-expansion algorithm for MAP inference always returns a globally optimal assignment for Markov Random Fields with Potts pairwise potentials, with a catch: the returned assignment is only guaranteed to be optimal for an instance within a small perturbation of the original problem instance. In other words, all local minima with respect to expansion moves are global minima to slightly perturbed versions of the problem. On \"real-world\" instances, MAP assignments of small perturbations of the problem should be very similar to the MAP assignment(s) of the original problem instance. We design an algorithm that can certify whether this is the case in practice. On several MAP inference problem instances from computer vision, this algorithm certifies that MAP solutions to all of these perturbations are very close to solutions of the original instance. These results taken together give a cohesive explanation for the good performance of \"graph cuts\" algorithms in practice. Every local expansion minimum is a global minimum in a small perturbation of the problem, and all of these global minima are close to the original solution.", "bibtex": "@InProceedings{pmlr-v139-lang21a,\n title = \t {Graph Cuts Always Find a Global Optimum for Potts Models (With a Catch)},\n author = {Lang, Hunter and Sontag, David and Vijayaraghavan, Aravindan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5990--5999},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lang21a/lang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lang21a.html},\n abstract = \t {We prove that the alpha-expansion algorithm for MAP inference always returns a globally optimal assignment for Markov Random Fields with Potts pairwise potentials, with a catch: the returned assignment is only guaranteed to be optimal for an instance within a small perturbation of the original problem instance. In other words, all local minima with respect to expansion moves are global minima to slightly perturbed versions of the problem. On \"real-world\" instances, MAP assignments of small perturbations of the problem should be very similar to the MAP assignment(s) of the original problem instance. We design an algorithm that can certify whether this is the case in practice. On several MAP inference problem instances from computer vision, this algorithm certifies that MAP solutions to all of these perturbations are very close to solutions of the original instance. These results taken together give a cohesive explanation for the good performance of \"graph cuts\" algorithms in practice. Every local expansion minimum is a global minimum in a small perturbation of the problem, and all of these global minima are close to the original solution.}\n}", "pdf": "http://proceedings.mlr.press/v139/lang21a/lang21a.pdf", "supp": "", "pdf_size": 1809296, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17687333451139468483&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MIT CSAIL, Cambridge MA, USA; MIT CSAIL, Cambridge MA, USA; Northwestern University, Evanston IL, USA", "aff_domain": "mit.edu; ; ", "email": "mit.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lang21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Northwestern University", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://www.csail.mit.edu;https://www.northwestern.edu", "aff_unique_abbr": "MIT CSAIL;NU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Cambridge;Evanston", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Graph Mixture Density Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9631", "id": "9631", "proceeding": "http://proceedings.mlr.press/v139/errica21a.html", "slides": "/media/icml-2021/Slides/9631.pdf", "author_site": "Federico Errica, Davide Bacciu, Alessio Micheli", "author": "Federico Errica; Davide Bacciu; Alessio Micheli", "abstract": "We introduce the Graph Mixture Density Networks, a new family of machine learning models that can fit multimodal output distributions conditioned on graphs of arbitrary topology. By combining ideas from mixture models and graph representation learning, we address a broader class of challenging conditional density estimation problems that rely on structured data. In this respect, we evaluate our method on a new benchmark application that leverages random graphs for stochastic epidemic simulations. We show a significant improvement in the likelihood of epidemic outcomes when taking into account both multimodality and structure. The empirical analysis is complemented by two real-world regression tasks showing the effectiveness of our approach in modeling the output prediction uncertainty. Graph Mixture Density Networks open appealing research opportunities in the study of structure-dependent phenomena that exhibit non-trivial conditional output distributions.", "bibtex": "@InProceedings{pmlr-v139-errica21a,\n title = \t {Graph Mixture Density Networks},\n author = {Errica, Federico and Bacciu, Davide and Micheli, Alessio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3025--3035},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/errica21a/errica21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/errica21a.html},\n abstract = \t {We introduce the Graph Mixture Density Networks, a new family of machine learning models that can fit multimodal output distributions conditioned on graphs of arbitrary topology. By combining ideas from mixture models and graph representation learning, we address a broader class of challenging conditional density estimation problems that rely on structured data. In this respect, we evaluate our method on a new benchmark application that leverages random graphs for stochastic epidemic simulations. We show a significant improvement in the likelihood of epidemic outcomes when taking into account both multimodality and structure. The empirical analysis is complemented by two real-world regression tasks showing the effectiveness of our approach in modeling the output prediction uncertainty. Graph Mixture Density Networks open appealing research opportunities in the study of structure-dependent phenomena that exhibit non-trivial conditional output distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/errica21a/errica21a.pdf", "supp": "", "pdf_size": 3636024, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13606441826263868149&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Pisa; Department of Computer Science, University of Pisa; Department of Computer Science, University of Pisa", "aff_domain": "phd.unipi.it;di.unipi.it;di.unipi.it", "email": "phd.unipi.it;di.unipi.it;di.unipi.it", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/errica21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Pisa", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.unipi.it", "aff_unique_abbr": "UNIPi", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Italy" }, { "title": "Graph Neural Networks Inspired by Classical Iterative Algorithms", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8797", "id": "8797", "proceeding": "http://proceedings.mlr.press/v139/yang21g.html", "slides": "", "author_site": "Yang Yongyi, Tang Liu, Yangkun Wang, Jinjing Zhou, Quan Gan, Zhewei Wei, Zheng Zhang, Zengfeng Huang, David Wipf", "author": "Yongyi Yang; Tang Liu; Yangkun Wang; Jinjing Zhou; Quan Gan; Zhewei Wei; Zheng Zhang; Zengfeng Huang; David Wipf", "abstract": "Despite the recent success of graph neural networks (GNN), common architectures often exhibit significant limitations, including sensitivity to oversmoothing, long-range dependencies, and spurious edges, e.g., as can occur as a result of graph heterophily or adversarial attacks. To at least partially address these issues within a simple transparent framework, we consider a new family of GNN layers designed to mimic and integrate the update rules of two classical iterative algorithms, namely, proximal gradient descent and iterative reweighted least squares (IRLS). The former defines an extensible base GNN architecture that is immune to oversmoothing while nonetheless capturing long-range dependencies by allowing arbitrary propagation steps. In contrast, the latter produces a novel attention mechanism that is explicitly anchored to an underlying end-to-end energy function, contributing stability with respect to edge uncertainty. When combined we obtain an extremely simple yet robust model that we evaluate across disparate scenarios including standardized benchmarks, adversarially-perturbated graphs, graphs with heterophily, and graphs involving long-range dependencies. In doing so, we compare against SOTA GNN approaches that have been explicitly designed for the respective task, achieving competitive or superior node classification accuracy. Our code is available at https://github.com/FFTYYY/TWIRLS. And for an extended version of this work, please see https://arxiv.org/abs/2103.06064.", "bibtex": "@InProceedings{pmlr-v139-yang21g,\n title = \t {Graph Neural Networks Inspired by Classical Iterative Algorithms},\n author = {Yang, Yongyi and Liu, Tang and Wang, Yangkun and Zhou, Jinjing and Gan, Quan and Wei, Zhewei and Zhang, Zheng and Huang, Zengfeng and Wipf, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11773--11783},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21g/yang21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21g.html},\n abstract = \t {Despite the recent success of graph neural networks (GNN), common architectures often exhibit significant limitations, including sensitivity to oversmoothing, long-range dependencies, and spurious edges, e.g., as can occur as a result of graph heterophily or adversarial attacks. To at least partially address these issues within a simple transparent framework, we consider a new family of GNN layers designed to mimic and integrate the update rules of two classical iterative algorithms, namely, proximal gradient descent and iterative reweighted least squares (IRLS). The former defines an extensible base GNN architecture that is immune to oversmoothing while nonetheless capturing long-range dependencies by allowing arbitrary propagation steps. In contrast, the latter produces a novel attention mechanism that is explicitly anchored to an underlying end-to-end energy function, contributing stability with respect to edge uncertainty. When combined we obtain an extremely simple yet robust model that we evaluate across disparate scenarios including standardized benchmarks, adversarially-perturbated graphs, graphs with heterophily, and graphs involving long-range dependencies. In doing so, we compare against SOTA GNN approaches that have been explicitly designed for the respective task, achieving competitive or superior node classification accuracy. Our code is available at https://github.com/FFTYYY/TWIRLS. And for an extended version of this work, please see https://arxiv.org/abs/2103.06064.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21g/yang21g.pdf", "supp": "", "pdf_size": 455784, "gs_citation": 104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7834297008396631458&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Fudan University\u2020; Fudan University\u2020; Shanghai Jiao Tong University\u2020; Amazon\u2020; Amazon\u2020; Renmin University of China\u2020; Amazon\u2020; Fudan University\u2020; Amazon\u2020", "aff_domain": "fudan.edu.cn;gmail.com; ; ; ; ; ; ;", "email": "fudan.edu.cn;gmail.com; ; ; ; ; ; ;", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/yang21g.html", "aff_unique_index": "0;0;1;2;2;3;2;0;2", "aff_unique_norm": "Fudan University;Shanghai Jiao Tong University;Amazon;Renmin University of China", "aff_unique_dep": ";;Amazon;", "aff_unique_url": "https://www.fudan.edu.cn;https://www.sjtu.edu.cn;https://www.amazon.com;http://www.ruc.edu.cn", "aff_unique_abbr": "Fudan;SJTU;Amazon;RUC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;1;0;1;0;1", "aff_country_unique": "China;United States" }, { "title": "GraphDF: A Discrete Flow Model for Molecular Graph Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10443", "id": "10443", "proceeding": "http://proceedings.mlr.press/v139/luo21a.html", "slides": "", "author_site": "Youzhi Luo, Keqiang Yan, Shuiwang Ji", "author": "Youzhi Luo; Keqiang Yan; Shuiwang Ji", "abstract": "We consider the problem of molecular graph generation using deep models. While graphs are discrete, most existing methods use continuous latent variables, resulting in inaccurate modeling of discrete graph structures. In this work, we propose GraphDF, a novel discrete latent variable model for molecular graph generation based on normalizing flow methods. GraphDF uses invertible modulo shift transforms to map discrete latent variables to graph nodes and edges. We show that the use of discrete latent variables reduces computational costs and eliminates the negative effect of dequantization. Comprehensive experimental results show that GraphDF outperforms prior methods on random generation, property optimization, and constrained optimization tasks.", "bibtex": "@InProceedings{pmlr-v139-luo21a,\n title = \t {GraphDF: A Discrete Flow Model for Molecular Graph Generation},\n author = {Luo, Youzhi and Yan, Keqiang and Ji, Shuiwang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7192--7203},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/luo21a/luo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/luo21a.html},\n abstract = \t {We consider the problem of molecular graph generation using deep models. While graphs are discrete, most existing methods use continuous latent variables, resulting in inaccurate modeling of discrete graph structures. In this work, we propose GraphDF, a novel discrete latent variable model for molecular graph generation based on normalizing flow methods. GraphDF uses invertible modulo shift transforms to map discrete latent variables to graph nodes and edges. We show that the use of discrete latent variables reduces computational costs and eliminates the negative effect of dequantization. Comprehensive experimental results show that GraphDF outperforms prior methods on random generation, property optimization, and constrained optimization tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/luo21a/luo21a.pdf", "supp": "", "pdf_size": 871089, "gs_citation": 229, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4581935661165018199&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science & Engineering, Texas A&M University, TX, USA; Department of Computer Science & Engineering, Texas A&M University, TX, USA; Department of Computer Science & Engineering, Texas A&M University, TX, USA", "aff_domain": "tamu.edu; ; ", "email": "tamu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/luo21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Texas A&M University", "aff_unique_dep": "Department of Computer Science & Engineering", "aff_unique_url": "https://www.tamu.edu", "aff_unique_abbr": "TAMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "TX", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "GraphNorm: A Principled Approach to Accelerating Graph Neural Network Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10655", "id": "10655", "proceeding": "http://proceedings.mlr.press/v139/cai21e.html", "slides": "/media/icml-2021/Slides/10655_KLtUTML.pdf", "author_site": "Tianle Cai, Shengjie Luo, Keyulu Xu, Di He, Tie-Yan Liu, Liwei Wang", "author": "Tianle Cai; Shengjie Luo; Keyulu Xu; Di He; Tie-Yan Liu; Liwei Wang", "abstract": "Normalization is known to help the optimization of deep neural networks. Curiously, different architectures require specialized normalization methods. In this paper, we study what normalization is effective for Graph Neural Networks (GNNs). First, we adapt and evaluate the existing methods from other domains to GNNs. Faster convergence is achieved with InstanceNorm compared to BatchNorm and LayerNorm. We provide an explanation by showing that InstanceNorm serves as a preconditioner for GNNs, but such preconditioning effect is weaker with BatchNorm due to the heavy batch noise in graph datasets. Second, we show that the shift operation in InstanceNorm results in an expressiveness degradation of GNNs for highly regular graphs. We address this issue by proposing GraphNorm with a learnable shift. Empirically, GNNs with GraphNorm converge faster compared to GNNs using other normalization. GraphNorm also improves the generalization of GNNs, achieving better performance on graph classification benchmarks.", "bibtex": "@InProceedings{pmlr-v139-cai21e,\n title = \t {GraphNorm: A Principled Approach to Accelerating Graph Neural Network Training},\n author = {Cai, Tianle and Luo, Shengjie and Xu, Keyulu and He, Di and Liu, Tie-Yan and Wang, Liwei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1204--1215},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21e/cai21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21e.html},\n abstract = \t {Normalization is known to help the optimization of deep neural networks. Curiously, different architectures require specialized normalization methods. In this paper, we study what normalization is effective for Graph Neural Networks (GNNs). First, we adapt and evaluate the existing methods from other domains to GNNs. Faster convergence is achieved with InstanceNorm compared to BatchNorm and LayerNorm. We provide an explanation by showing that InstanceNorm serves as a preconditioner for GNNs, but such preconditioning effect is weaker with BatchNorm due to the heavy batch noise in graph datasets. Second, we show that the shift operation in InstanceNorm results in an expressiveness degradation of GNNs for highly regular graphs. We address this issue by proposing GraphNorm with a learnable shift. Empirically, GNNs with GraphNorm converge faster compared to GNNs using other normalization. GraphNorm also improves the generalization of GNNs, achieving better performance on graph classification benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21e/cai21e.pdf", "supp": "", "pdf_size": 4180782, "gs_citation": 230, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1826997539590987231&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cai21e.html" }, { "title": "Grey-box Extraction of Natural Language Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8783", "id": "8783", "proceeding": "http://proceedings.mlr.press/v139/zanella-beguelin21a.html", "slides": "", "author_site": "Santiago Zanella-Beguelin, Shruti Tople, Andrew Paverd, Boris K\u00f6pf", "author": "Santiago Zanella-Beguelin; Shruti Tople; Andrew Paverd; Boris K\u00f6pf", "abstract": "Model extraction attacks attempt to replicate a target machine learning model by querying its inference API. State-of-the-art attacks are learning-based and construct replicas by supervised training on the target model\u2019s predictions, but an emerging class of attacks exploit algebraic properties to obtain high-fidelity replicas using orders of magnitude fewer queries. So far, these algebraic attacks have been limited to neural networks with few hidden layers and ReLU activations. In this paper we present algebraic and hybrid algebraic/learning-based attacks on large-scale natural language models. We consider a grey-box setting, targeting models with a pre-trained (public) encoder followed by a single (private) classification layer. Our key findings are that (i) with a frozen encoder, high-fidelity extraction is possible with a small number of in-distribution queries, making extraction attacks indistinguishable from legitimate use; (ii) when the encoder is fine-tuned, a hybrid learning-based/algebraic attack improves over the learning-based state-of-the-art without requiring additional queries.", "bibtex": "@InProceedings{pmlr-v139-zanella-beguelin21a,\n title = \t {Grey-box Extraction of Natural Language Models},\n author = {Zanella-Beguelin, Santiago and Tople, Shruti and Paverd, Andrew and K{\\\"o}pf, Boris},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12278--12286},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zanella-beguelin21a/zanella-beguelin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zanella-beguelin21a.html},\n abstract = \t {Model extraction attacks attempt to replicate a target machine learning model by querying its inference API. State-of-the-art attacks are learning-based and construct replicas by supervised training on the target model\u2019s predictions, but an emerging class of attacks exploit algebraic properties to obtain high-fidelity replicas using orders of magnitude fewer queries. So far, these algebraic attacks have been limited to neural networks with few hidden layers and ReLU activations. In this paper we present algebraic and hybrid algebraic/learning-based attacks on large-scale natural language models. We consider a grey-box setting, targeting models with a pre-trained (public) encoder followed by a single (private) classification layer. Our key findings are that (i) with a frozen encoder, high-fidelity extraction is possible with a small number of in-distribution queries, making extraction attacks indistinguishable from legitimate use; (ii) when the encoder is fine-tuned, a hybrid learning-based/algebraic attack improves over the learning-based state-of-the-art without requiring additional queries.}\n}", "pdf": "http://proceedings.mlr.press/v139/zanella-beguelin21a/zanella-beguelin21a.pdf", "supp": "", "pdf_size": 477411, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9632489659096192723&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Microsoft Research; Microsoft Research; Microsoft Research + Microsoft Security Response Center; Microsoft Research", "aff_domain": "microsoft.com; ;microsoft.com;microsoft.com", "email": "microsoft.com; ;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zanella-beguelin21a.html", "aff_unique_index": "0;0;0+0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Grid-Functioned Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9297", "id": "9297", "proceeding": "http://proceedings.mlr.press/v139/dehesa21a.html", "slides": "/media/icml-2021/Slides/9297.pdf", "author_site": "Javier Dehesa, Andrew Vidler, Julian Padget, Christof Lutteroth", "author": "Javier Dehesa; Andrew Vidler; Julian Padget; Christof Lutteroth", "abstract": "We introduce a new neural network architecture that we call \"grid-functioned\" neural networks. It utilises a grid structure of network parameterisations that can be specialised for different subdomains of the problem, while maintaining smooth, continuous behaviour. The grid gives the user flexibility to prevent gross features from overshadowing important minor ones. We present a full characterisation of its computational and spatial complexity, and demonstrate its potential, compared to a traditional architecture, over a set of synthetic regression problems. We further illustrate the benefits through a real-world 3D skeletal animation case study, where it offers the same visual quality as a state-of-the-art model, but with lower computational complexity and better control accuracy.", "bibtex": "@InProceedings{pmlr-v139-dehesa21a,\n title = \t {Grid-Functioned Neural Networks},\n author = {Dehesa, Javier and Vidler, Andrew and Padget, Julian and Lutteroth, Christof},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2559--2567},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dehesa21a/dehesa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dehesa21a.html},\n abstract = \t {We introduce a new neural network architecture that we call \"grid-functioned\" neural networks. It utilises a grid structure of network parameterisations that can be specialised for different subdomains of the problem, while maintaining smooth, continuous behaviour. The grid gives the user flexibility to prevent gross features from overshadowing important minor ones. We present a full characterisation of its computational and spatial complexity, and demonstrate its potential, compared to a traditional architecture, over a set of synthetic regression problems. We further illustrate the benefits through a real-world 3D skeletal animation case study, where it offers the same visual quality as a state-of-the-art model, but with lower computational complexity and better control accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/dehesa21a/dehesa21a.pdf", "supp": "", "pdf_size": 2812824, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8669540869747284534&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Bath, Bath, UK+Ninja Theory, Cambridge, UK; Ninja Theory, Cambridge, UK; Department of Computer Science, University of Bath, Bath, UK; Department of Computer Science, University of Bath, Bath, UK", "aff_domain": "bath.edu; ; ; ", "email": "bath.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/dehesa21a.html", "aff_unique_index": "0+1;1;0;0", "aff_unique_norm": "University of Bath;Ninja Theory", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.bath.ac.uk;https://www.ninjatheory.com", "aff_unique_abbr": "Bath;", "aff_campus_unique_index": "0+1;1;0;0", "aff_campus_unique": "Bath;Cambridge", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Grounding Language to Entities and Dynamics for Generalization in Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9965", "id": "9965", "proceeding": "http://proceedings.mlr.press/v139/hanjie21a.html", "slides": "/media/icml-2021/Slides/9965.pdf", "author_site": "Austin W. Hanjie, Victor Zhong, Karthik Narasimhan", "author": "Austin W. Hanjie; Victor Y Zhong; Karthik Narasimhan", "abstract": "We investigate the use of natural language to drive the generalization of control policies and introduce the new multi-task environment Messenger with free-form text manuals describing the environment dynamics. Unlike previous work, Messenger does not assume prior knowledge connecting text and state observations {\u2014} the control policy must simultaneously ground the game manual to entity symbols and dynamics in the environment. We develop a new model, EMMA (Entity Mapper with Multi-modal Attention) which uses an entity-conditioned attention module that allows for selective focus over relevant descriptions in the manual for each entity in the environment. EMMA is end-to-end differentiable and learns a latent grounding of entities and dynamics from text to observations using only environment rewards. EMMA achieves successful zero-shot generalization to unseen games with new dynamics, obtaining a 40% higher win rate compared to multiple baselines. However, win rate on the hardest stage of Messenger remains low (10%), demonstrating the need for additional work in this direction.", "bibtex": "@InProceedings{pmlr-v139-hanjie21a,\n title = \t {Grounding Language to Entities and Dynamics for Generalization in Reinforcement Learning},\n author = {Hanjie, Austin W. and Zhong, Victor Y and Narasimhan, Karthik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4051--4062},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hanjie21a/hanjie21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hanjie21a.html},\n abstract = \t {We investigate the use of natural language to drive the generalization of control policies and introduce the new multi-task environment Messenger with free-form text manuals describing the environment dynamics. Unlike previous work, Messenger does not assume prior knowledge connecting text and state observations {\u2014} the control policy must simultaneously ground the game manual to entity symbols and dynamics in the environment. We develop a new model, EMMA (Entity Mapper with Multi-modal Attention) which uses an entity-conditioned attention module that allows for selective focus over relevant descriptions in the manual for each entity in the environment. EMMA is end-to-end differentiable and learns a latent grounding of entities and dynamics from text to observations using only environment rewards. EMMA achieves successful zero-shot generalization to unseen games with new dynamics, obtaining a 40% higher win rate compared to multiple baselines. However, win rate on the hardest stage of Messenger remains low (10%), demonstrating the need for additional work in this direction.}\n}", "pdf": "http://proceedings.mlr.press/v139/hanjie21a/hanjie21a.pdf", "supp": "", "pdf_size": 2296424, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14975248165561232256&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Computer Science, Princeton University; Computer Science, University of Washington; Computer Science, Princeton University", "aff_domain": "cs.princeton.edu; ; ", "email": "cs.princeton.edu; ; ", "github": "https://github.com/ahjwang/messenger-emma", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hanjie21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Princeton University;University of Washington", "aff_unique_dep": "Computer Science;Computer Science", "aff_unique_url": "https://www.princeton.edu;https://www.washington.edu", "aff_unique_abbr": "Princeton;UW", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Princeton;Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Group Fisher Pruning for Practical Network Compression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9875", "id": "9875", "proceeding": "http://proceedings.mlr.press/v139/liu21ab.html", "slides": "", "author_site": "Liyang Liu, Shilong Zhang, Zhanghui Kuang, Aojun Zhou, Jing-Hao Xue, Xinjiang Wang, Yimin Chen, Wenming Yang, Qingmin Liao, Wayne Zhang", "author": "Liyang Liu; Shilong Zhang; Zhanghui Kuang; Aojun Zhou; Jing-Hao Xue; Xinjiang Wang; Yimin Chen; Wenming Yang; Qingmin Liao; Wayne Zhang", "abstract": "Network compression has been widely studied since it is able to reduce the memory and computation cost during inference. However, previous methods seldom deal with complicated structures like residual connections, group/depth-wise convolution and feature pyramid network, where channels of multiple layers are coupled and need to be pruned simultaneously. In this paper, we present a general channel pruning approach that can be applied to various complicated structures. Particularly, we propose a layer grouping algorithm to find coupled channels automatically. Then we derive a unified metric based on Fisher information to evaluate the importance of a single channel and coupled channels. Moreover, we find that inference speedup on GPUs is more correlated with the reduction of memory rather than FLOPs, and thus we employ the memory reduction of each channel to normalize the importance. Our method can be used to prune any structures including those with coupled channels. We conduct extensive experiments on various backbones, including the classic ResNet and ResNeXt, mobile-friendly MobileNetV2, and the NAS-based RegNet, both on image classification and object detection which is under-explored. Experimental results validate that our method can effectively prune sophisticated networks, boosting inference speed without sacrificing accuracy.", "bibtex": "@InProceedings{pmlr-v139-liu21ab,\n title = \t {Group Fisher Pruning for Practical Network Compression},\n author = {Liu, Liyang and Zhang, Shilong and Kuang, Zhanghui and Zhou, Aojun and Xue, Jing-Hao and Wang, Xinjiang and Chen, Yimin and Yang, Wenming and Liao, Qingmin and Zhang, Wayne},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7021--7032},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21ab/liu21ab.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21ab.html},\n abstract = \t {Network compression has been widely studied since it is able to reduce the memory and computation cost during inference. However, previous methods seldom deal with complicated structures like residual connections, group/depth-wise convolution and feature pyramid network, where channels of multiple layers are coupled and need to be pruned simultaneously. In this paper, we present a general channel pruning approach that can be applied to various complicated structures. Particularly, we propose a layer grouping algorithm to find coupled channels automatically. Then we derive a unified metric based on Fisher information to evaluate the importance of a single channel and coupled channels. Moreover, we find that inference speedup on GPUs is more correlated with the reduction of memory rather than FLOPs, and thus we employ the memory reduction of each channel to normalize the importance. Our method can be used to prune any structures including those with coupled channels. We conduct extensive experiments on various backbones, including the classic ResNet and ResNeXt, mobile-friendly MobileNetV2, and the NAS-based RegNet, both on image classification and object detection which is under-explored. Experimental results validate that our method can effectively prune sophisticated networks, boosting inference speed without sacrificing accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21ab/liu21ab.pdf", "supp": "", "pdf_size": 3012865, "gs_citation": 200, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7436704720048829343&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;;;;;;;;", "aff_domain": ";;;;;;;;;", "email": ";;;;;;;;;", "github": "https://github.com/jshilong/FisherPruning", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v139/liu21ab.html" }, { "title": "Group-Sparse Matrix Factorization for Transfer Learning of Word Embeddings", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8927", "id": "8927", "proceeding": "http://proceedings.mlr.press/v139/xu21l.html", "slides": "/media/icml-2021/Slides/8927.pdf", "author_site": "Kan Xu, Xuanyi Zhao, Hamsa Bastani, Osbert Bastani", "author": "Kan Xu; Xuanyi Zhao; Hamsa Bastani; Osbert Bastani", "abstract": "Sparse regression has recently been applied to enable transfer learning from very limited data. We study an extension of this approach to unsupervised learning\u2014in particular, learning word embeddings from unstructured text corpora using low-rank matrix factorization. Intuitively, when transferring word embeddings to a new domain, we expect that the embeddings change for only a small number of words\u2014e.g., the ones with novel meanings in that domain. We propose a novel group-sparse penalty that exploits this sparsity to perform transfer learning when there is very little text data available in the target domain\u2014e.g., a single article of text. We prove generalization bounds for our algorithm. Furthermore, we empirically evaluate its effectiveness, both in terms of prediction accuracy in downstream tasks as well as in terms of interpretability of the results.", "bibtex": "@InProceedings{pmlr-v139-xu21l,\n title = \t {Group-Sparse Matrix Factorization for Transfer Learning of Word Embeddings},\n author = {Xu, Kan and Zhao, Xuanyi and Bastani, Hamsa and Bastani, Osbert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11603--11612},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21l/xu21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21l.html},\n abstract = \t {Sparse regression has recently been applied to enable transfer learning from very limited data. We study an extension of this approach to unsupervised learning\u2014in particular, learning word embeddings from unstructured text corpora using low-rank matrix factorization. Intuitively, when transferring word embeddings to a new domain, we expect that the embeddings change for only a small number of words\u2014e.g., the ones with novel meanings in that domain. We propose a novel group-sparse penalty that exploits this sparsity to perform transfer learning when there is very little text data available in the target domain\u2014e.g., a single article of text. We prove generalization bounds for our algorithm. Furthermore, we empirically evaluate its effectiveness, both in terms of prediction accuracy in downstream tasks as well as in terms of interpretability of the results.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21l/xu21l.pdf", "supp": "", "pdf_size": 1133877, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1132104059937932692&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", "aff_domain": "sas.upenn.edu; ; ; ", "email": "sas.upenn.edu; ; ; ", "github": "https://github.com/kanxu526/GroupTLWordEmbedding", "project": "https://arxiv.org/abs/2104.08928", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xu21l.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Guarantees for Tuning the Step Size using a Learning-to-Learn Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9003", "id": "9003", "proceeding": "http://proceedings.mlr.press/v139/wang21ac.html", "slides": "/media/icml-2021/Slides/9003.pdf", "author_site": "Xiang Wang, Shuai Yuan, Chenwei Wu, Rong Ge", "author": "Xiang Wang; Shuai Yuan; Chenwei Wu; Rong Ge", "abstract": "Choosing the right parameters for optimization algorithms is often the key to their success in practice. Solving this problem using a learning-to-learn approach\u2014using meta-gradient descent on a meta-objective based on the trajectory that the optimizer generates\u2014was recently shown to be effective. However, the meta-optimization problem is difficult. In particular, the meta-gradient can often explode/vanish, and the learned optimizer may not have good generalization performance if the meta-objective is not chosen carefully. In this paper we give meta-optimization guarantees for the learning-to-learn approach on a simple problem of tuning the step size for quadratic loss. Our results show that the na\u00efve objective suffers from meta-gradient explosion/vanishing problem. Although there is a way to design the meta-objective so that the meta-gradient remains polynomially bounded, computing the meta-gradient directly using backpropagation leads to numerical issues. We also characterize when it is necessary to compute the meta-objective on a separate validation set to ensure the generalization performance of the learned optimizer. Finally, we verify our results empirically and show that a similar phenomenon appears even for more complicated learned optimizers parametrized by neural networks.", "bibtex": "@InProceedings{pmlr-v139-wang21ac,\n title = \t {Guarantees for Tuning the Step Size using a Learning-to-Learn Approach},\n author = {Wang, Xiang and Yuan, Shuai and Wu, Chenwei and Ge, Rong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10981--10990},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21ac/wang21ac.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21ac.html},\n abstract = \t {Choosing the right parameters for optimization algorithms is often the key to their success in practice. Solving this problem using a learning-to-learn approach\u2014using meta-gradient descent on a meta-objective based on the trajectory that the optimizer generates\u2014was recently shown to be effective. However, the meta-optimization problem is difficult. In particular, the meta-gradient can often explode/vanish, and the learned optimizer may not have good generalization performance if the meta-objective is not chosen carefully. In this paper we give meta-optimization guarantees for the learning-to-learn approach on a simple problem of tuning the step size for quadratic loss. Our results show that the na\u00efve objective suffers from meta-gradient explosion/vanishing problem. Although there is a way to design the meta-objective so that the meta-gradient remains polynomially bounded, computing the meta-gradient directly using backpropagation leads to numerical issues. We also characterize when it is necessary to compute the meta-objective on a separate validation set to ensure the generalization performance of the learned optimizer. Finally, we verify our results empirically and show that a similar phenomenon appears even for more complicated learned optimizers parametrized by neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21ac/wang21ac.pdf", "supp": "", "pdf_size": 380940, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14011148372183922163&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, Duke University; Department of Computer Science, Duke University; Department of Computer Science, Duke University; Department of Computer Science, Duke University", "aff_domain": "cs.duke.edu; ; ; ", "email": "cs.duke.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21ac.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Guided Exploration with Proximal Policy Optimization using a Single Demonstration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10255", "id": "10255", "proceeding": "http://proceedings.mlr.press/v139/libardi21a.html", "slides": "", "author_site": "Gabriele Libardi, Gianni De Fabritiis, Sebastian Dittert", "author": "Gabriele Libardi; Gianni De Fabritiis; Sebastian Dittert", "abstract": "Solving sparse reward tasks through exploration is one of the major challenges in deep reinforcement learning, especially in three-dimensional, partially-observable environments. Critically, the algorithm proposed in this article is capable of using a single human demonstration to solve hard-exploration problems. We train an agent on a combination of demonstrations and own experience to solve problems with variable initial conditions and we integrate it with proximal policy optimization (PPO). The agent is also able to increase its performance and to tackle harder problems by replaying its own past trajectories prioritizing them based on the obtained reward and the maximum value of the trajectory. We finally compare variations of this algorithm to different imitation learning algorithms on a set of hard-exploration tasks in the Animal-AI Olympics environment. To the best of our knowledge, learning a task in a three-dimensional environment with comparable difficulty has never been considered before using only one human demonstration.", "bibtex": "@InProceedings{pmlr-v139-libardi21a,\n title = \t {Guided Exploration with Proximal Policy Optimization using a Single Demonstration},\n author = {Libardi, Gabriele and De Fabritiis, Gianni and Dittert, Sebastian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6611--6620},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/libardi21a/libardi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/libardi21a.html},\n abstract = \t {Solving sparse reward tasks through exploration is one of the major challenges in deep reinforcement learning, especially in three-dimensional, partially-observable environments. Critically, the algorithm proposed in this article is capable of using a single human demonstration to solve hard-exploration problems. We train an agent on a combination of demonstrations and own experience to solve problems with variable initial conditions and we integrate it with proximal policy optimization (PPO). The agent is also able to increase its performance and to tackle harder problems by replaying its own past trajectories prioritizing them based on the obtained reward and the maximum value of the trajectory. We finally compare variations of this algorithm to different imitation learning algorithms on a set of hard-exploration tasks in the Animal-AI Olympics environment. To the best of our knowledge, learning a task in a three-dimensional environment with comparable difficulty has never been considered before using only one human demonstration.}\n}", "pdf": "http://proceedings.mlr.press/v139/libardi21a/libardi21a.pdf", "supp": "", "pdf_size": 984625, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1058578842192260735&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Computational Science Laboratory, Universitat Pompeu Fabra (UPF)+ICREA; Computational Science Laboratory, Universitat Pompeu Fabra (UPF); Computational Science Laboratory, Universitat Pompeu Fabra (UPF)+ICREA", "aff_domain": "yahoo.it; ;upf.edu", "email": "yahoo.it; ;upf.edu", "github": "https://github.com/compsciencelab/ppo_D", "project": "https://www.youtube.com/playlist?list=PLBeSdcnDP2WFQWLBrLGSkwtitneOelcm-", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/libardi21a.html", "aff_unique_index": "1;1", "aff_unique_norm": ";Instituci\u00f3 Catalana de Recerca i Estudis Avan\u00e7ats", "aff_unique_dep": ";", "aff_unique_url": ";https://www.icrea.cat", "aff_unique_abbr": ";ICREA", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "1;1", "aff_country_unique": ";Spain" }, { "title": "HAWQ-V3: Dyadic Neural Network Quantization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10099", "id": "10099", "proceeding": "http://proceedings.mlr.press/v139/yao21a.html", "slides": "", "author_site": "Zhewei Yao, Zhen Dong, Zhangcheng Zheng, Amir Gholaminejad, Jiali Yu, Eric Tan, Leyuan Wang, Qijing Huang, Yida Wang, Michael Mahoney, EECS Kurt Keutzer", "author": "Zhewei Yao; Zhen Dong; Zhangcheng Zheng; Amir Gholami; Jiali Yu; Eric Tan; Leyuan Wang; Qijing Huang; Yida Wang; Michael Mahoney; Kurt Keutzer", "abstract": "Current low-precision quantization algorithms often have the hidden cost of conversion back and forth from floating point to quantized integer values. This hidden cost limits the latency improvement realized by quantizing Neural Networks. To address this, we present HAWQ-V3, a novel mixed-precision integer-only quantization framework. The contributions of HAWQ-V3 are the following: (i) An integer-only inference where the entire computational graph is performed only with integer multiplication, addition, and bit shifting, without any floating point operations or even integer division; (ii) A novel hardware-aware mixed-precision quantization method where the bit-precision is calculated by solving an integer linear programming problem that balances the trade-off between model perturbation and other constraints, e.g., memory footprint and latency; (iii) Direct hardware deployment and open source contribution for 4-bit uniform/mixed-precision quantization in TVM, achieving an average speed up of 1.45x for uniform 4-bit, as compared to uniform 8-bit for ResNet50 on T4 GPUs; and (iv) extensive evaluation of the proposed methods on ResNet18/50 and InceptionV3, for various model compression levels with/without mixed precision. For ResNet50, our INT8 quantization achieves an accuracy of 77.58%, which is 2.68% higher than prior integer-only work, and our mixed-precision INT4/8 quantization can reduce INT8 latency by 23% and still achieve 76.73% accuracy. Our framework and the TVM implementation have been open sourced (HAWQ, 2020).", "bibtex": "@InProceedings{pmlr-v139-yao21a,\n title = \t {HAWQ-V3: Dyadic Neural Network Quantization},\n author = {Yao, Zhewei and Dong, Zhen and Zheng, Zhangcheng and Gholami, Amir and Yu, Jiali and Tan, Eric and Wang, Leyuan and Huang, Qijing and Wang, Yida and Mahoney, Michael and Keutzer, Kurt},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11875--11886},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yao21a/yao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yao21a.html},\n abstract = \t {Current low-precision quantization algorithms often have the hidden cost of conversion back and forth from floating point to quantized integer values. This hidden cost limits the latency improvement realized by quantizing Neural Networks. To address this, we present HAWQ-V3, a novel mixed-precision integer-only quantization framework. The contributions of HAWQ-V3 are the following: (i) An integer-only inference where the entire computational graph is performed only with integer multiplication, addition, and bit shifting, without any floating point operations or even integer division; (ii) A novel hardware-aware mixed-precision quantization method where the bit-precision is calculated by solving an integer linear programming problem that balances the trade-off between model perturbation and other constraints, e.g., memory footprint and latency; (iii) Direct hardware deployment and open source contribution for 4-bit uniform/mixed-precision quantization in TVM, achieving an average speed up of 1.45x for uniform 4-bit, as compared to uniform 8-bit for ResNet50 on T4 GPUs; and (iv) extensive evaluation of the proposed methods on ResNet18/50 and InceptionV3, for various model compression levels with/without mixed precision. For ResNet50, our INT8 quantization achieves an accuracy of 77.58%, which is 2.68% higher than prior integer-only work, and our mixed-precision INT4/8 quantization can reduce INT8 latency by 23% and still achieve 76.73% accuracy. Our framework and the TVM implementation have been open sourced (HAWQ, 2020).}\n}", "pdf": "http://proceedings.mlr.press/v139/yao21a/yao21a.pdf", "supp": "", "pdf_size": 2660835, "gs_citation": 309, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17921583218157904692&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; Amazon + Shanghai Jiao Tong University; University of California, Berkeley; Shanghai Jiao Tong University; University of California, Berkeley; Shanghai Jiao Tong University; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu; ; ; ; ; ; ; ; ; ", "email": "berkeley.edu;berkeley.edu; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 11, "oa": "https://proceedings.mlr.press/v139/yao21a.html", "aff_unique_index": "0;0;0;0;1+2;0;2;0;2;0;0", "aff_unique_norm": "University of California, Berkeley;Amazon;Shanghai Jiao Tong University", "aff_unique_dep": ";Amazon.com, Inc.;", "aff_unique_url": "https://www.berkeley.edu;https://www.amazon.com;https://www.sjtu.edu.cn", "aff_unique_abbr": "UC Berkeley;Amazon;SJTU", "aff_campus_unique_index": "0;0;0;0;;0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0;0+1;0;1;0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "HEMET: A Homomorphic-Encryption-Friendly Privacy-Preserving Mobile Neural Network Architecture", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10389", "id": "10389", "proceeding": "http://proceedings.mlr.press/v139/lou21a.html", "slides": "", "author_site": "Qian Lou, Lei Jiang", "author": "Qian Lou; Lei Jiang", "abstract": "Recently Homomorphic Encryption (HE) is used to implement Privacy-Preserving Neural Networks (PPNNs) that perform inferences directly on encrypted data without decryption. Prior PPNNs adopt mobile network architectures such as SqueezeNet for smaller computing overhead, but we find na\u00efvely using mobile network architectures for a PPNN does not necessarily achieve shorter inference latency. Despite having less parameters, a mobile network architecture typically introduces more layers and increases the HE multiplicative depth of a PPNN, thereby prolonging its inference latency. In this paper, we propose a \\textbf{HE}-friendly privacy-preserving \\textbf{M}obile neural n\\textbf{ET}work architecture, \\textbf{HEMET}. Experimental results show that, compared to state-of-the-art (SOTA) PPNNs, HEMET reduces the inference latency by $59.3%\\sim 61.2%$, and improves the inference accuracy by $0.4 % \\sim 0.5%$.", "bibtex": "@InProceedings{pmlr-v139-lou21a,\n title = \t {HEMET: A Homomorphic-Encryption-Friendly Privacy-Preserving Mobile Neural Network Architecture},\n author = {Lou, Qian and Jiang, Lei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7102--7110},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lou21a/lou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lou21a.html},\n abstract = \t {Recently Homomorphic Encryption (HE) is used to implement Privacy-Preserving Neural Networks (PPNNs) that perform inferences directly on encrypted data without decryption. Prior PPNNs adopt mobile network architectures such as SqueezeNet for smaller computing overhead, but we find na\u00efvely using mobile network architectures for a PPNN does not necessarily achieve shorter inference latency. Despite having less parameters, a mobile network architecture typically introduces more layers and increases the HE multiplicative depth of a PPNN, thereby prolonging its inference latency. In this paper, we propose a \\textbf{HE}-friendly privacy-preserving \\textbf{M}obile neural n\\textbf{ET}work architecture, \\textbf{HEMET}. Experimental results show that, compared to state-of-the-art (SOTA) PPNNs, HEMET reduces the inference latency by $59.3%\\sim 61.2%$, and improves the inference accuracy by $0.4 % \\sim 0.5%$.}\n}", "pdf": "http://proceedings.mlr.press/v139/lou21a/lou21a.pdf", "supp": "", "pdf_size": 803945, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7740859025793659794&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Indiana University Bloomington; Indiana University Bloomington", "aff_domain": "iu.edu;iu.edu", "email": "iu.edu;iu.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lou21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Indiana University", "aff_unique_dep": "", "aff_unique_url": "https://www.indiana.edu", "aff_unique_abbr": "IU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Bloomington", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8443", "id": "8443", "proceeding": "http://proceedings.mlr.press/v139/nayman21a.html", "slides": "", "author_site": "Niv Nayman, Yonathan Aflalo, Asaf Noy, Lihi Zelnik", "author": "Niv Nayman; Yonathan Aflalo; Asaf Noy; Lihi Zelnik", "abstract": "Realistic use of neural networks often requires adhering to multiple constraints on latency, energy and memory among others. A popular approach to find fitting networks is through constrained Neural Architecture Search (NAS), however, previous methods enforce the constraint only softly. Therefore, the resulting networks do not exactly adhere to the resource constraint and their accuracy is harmed. In this work we resolve this by introducing Hard Constrained diffeRentiable NAS (HardCoRe-NAS), that is based on an accurate formulation of the expected resource requirement and a scalable search method that satisfies the hard constraint throughout the search. Our experiments show that HardCoRe-NAS generates state-of-the-art architectures, surpassing other NAS methods, while strictly satisfying the hard resource constraints without any tuning required.", "bibtex": "@InProceedings{pmlr-v139-nayman21a,\n title = \t {HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search},\n author = {Nayman, Niv and Aflalo, Yonathan and Noy, Asaf and Zelnik, Lihi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7979--7990},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nayman21a/nayman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nayman21a.html},\n abstract = \t {Realistic use of neural networks often requires adhering to multiple constraints on latency, energy and memory among others. A popular approach to find fitting networks is through constrained Neural Architecture Search (NAS), however, previous methods enforce the constraint only softly. Therefore, the resulting networks do not exactly adhere to the resource constraint and their accuracy is harmed. In this work we resolve this by introducing Hard Constrained diffeRentiable NAS (HardCoRe-NAS), that is based on an accurate formulation of the expected resource requirement and a scalable search method that satisfies the hard constraint throughout the search. Our experiments show that HardCoRe-NAS generates state-of-the-art architectures, surpassing other NAS methods, while strictly satisfying the hard resource constraints without any tuning required.}\n}", "pdf": "http://proceedings.mlr.press/v139/nayman21a/nayman21a.pdf", "supp": "", "pdf_size": 1128203, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12851686551366341896&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Alibaba Group, Tel Aviv, Israel; Alibaba Group, Tel Aviv, Israel; Alibaba Group, Tel Aviv, Israel; Alibaba Group, Tel Aviv, Israel", "aff_domain": "alibaba-inc.com;gmail.com; ; ", "email": "alibaba-inc.com;gmail.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/nayman21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Alibaba Group", "aff_unique_dep": "", "aff_unique_url": "https://www.alibaba.com", "aff_unique_abbr": "Alibaba", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "title": "Heterogeneity for the Win: One-Shot Federated Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9973", "id": "9973", "proceeding": "http://proceedings.mlr.press/v139/dennis21a.html", "slides": "", "author_site": "Don Kurian Dennis, Tian Li, Virginia Smith", "author": "Don Kurian Dennis; Tian Li; Virginia Smith", "abstract": "In this work, we explore the unique challenges\u2014and opportunities\u2014of unsupervised federated learning (FL). We develop and analyze a one-shot federated clustering scheme, kfed, based on the widely-used Lloyd\u2019s method for $k$-means clustering. In contrast to many supervised problems, we show that the issue of statistical heterogeneity in federated networks can in fact benefit our analysis. We analyse kfed under a center separation assumption and compare it to the best known requirements of its centralized counterpart. Our analysis shows that in heterogeneous regimes where the number of clusters per device $(k\u2019)$ is smaller than the total number of clusters over the network $k$, $(k\u2019\\le \\sqrt{k})$, we can use heterogeneity to our advantage\u2014significantly weakening the cluster separation requirements for kfed. From a practical viewpoint, kfed also has many desirable properties: it requires only round of communication, can run asynchronously, and can handle partial participation or node/network failures. We motivate our analysis with experiments on common FL benchmarks, and highlight the practical utility of one-shot clustering through use-cases in personalized FL and device sampling.", "bibtex": "@InProceedings{pmlr-v139-dennis21a,\n title = \t {Heterogeneity for the Win: One-Shot Federated Clustering},\n author = {Dennis, Don Kurian and Li, Tian and Smith, Virginia},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2611--2620},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dennis21a/dennis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dennis21a.html},\n abstract = \t {In this work, we explore the unique challenges\u2014and opportunities\u2014of unsupervised federated learning (FL). We develop and analyze a one-shot federated clustering scheme, kfed, based on the widely-used Lloyd\u2019s method for $k$-means clustering. In contrast to many supervised problems, we show that the issue of statistical heterogeneity in federated networks can in fact benefit our analysis. We analyse kfed under a center separation assumption and compare it to the best known requirements of its centralized counterpart. Our analysis shows that in heterogeneous regimes where the number of clusters per device $(k\u2019)$ is smaller than the total number of clusters over the network $k$, $(k\u2019\\le \\sqrt{k})$, we can use heterogeneity to our advantage\u2014significantly weakening the cluster separation requirements for kfed. From a practical viewpoint, kfed also has many desirable properties: it requires only round of communication, can run asynchronously, and can handle partial participation or node/network failures. We motivate our analysis with experiments on common FL benchmarks, and highlight the practical utility of one-shot clustering through use-cases in personalized FL and device sampling.}\n}", "pdf": "http://proceedings.mlr.press/v139/dennis21a/dennis21a.pdf", "supp": "", "pdf_size": 2240361, "gs_citation": 199, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14124457958775969601&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Carnegie Mellon University, Pittsburgh, PA, USA; Carnegie Mellon University, Pittsburgh, PA, USA; Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "cmu.edu; ; ", "email": "cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dennis21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Heterogeneous Risk Minimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9881", "id": "9881", "proceeding": "http://proceedings.mlr.press/v139/liu21h.html", "slides": "/media/icml-2021/Slides/9881.pdf", "author_site": "Jiashuo Liu, Zheyuan Hu, Peng Cui, Bo Li, Zheyan Shen", "author": "Jiashuo Liu; Zheyuan Hu; Peng Cui; Bo Li; Zheyan Shen", "abstract": "Machine learning algorithms with empirical risk minimization usually suffer from poor generalization performance due to the greedy exploitation of correlations among the training data, which are not stable under distributional shifts. Recently, some invariant learning methods for out-of-distribution (OOD) generalization have been proposed by leveraging multiple training environments to find invariant relationships. However, modern datasets are frequently assembled by merging data from multiple sources without explicit source labels. The resultant unobserved heterogeneity renders many invariant learning methods inapplicable. In this paper, we propose Heterogeneous Risk Minimization (HRM) framework to achieve joint learning of latent heterogeneity among the data and invariant relationship, which leads to stable prediction despite distributional shifts. We theoretically characterize the roles of the environment labels in invariant learning and justify our newly proposed HRM framework. Extensive experimental results validate the effectiveness of our HRM framework.", "bibtex": "@InProceedings{pmlr-v139-liu21h,\n title = \t {Heterogeneous Risk Minimization},\n author = {Liu, Jiashuo and Hu, Zheyuan and Cui, Peng and Li, Bo and Shen, Zheyan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6804--6814},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21h/liu21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21h.html},\n abstract = \t {Machine learning algorithms with empirical risk minimization usually suffer from poor generalization performance due to the greedy exploitation of correlations among the training data, which are not stable under distributional shifts. Recently, some invariant learning methods for out-of-distribution (OOD) generalization have been proposed by leveraging multiple training environments to find invariant relationships. However, modern datasets are frequently assembled by merging data from multiple sources without explicit source labels. The resultant unobserved heterogeneity renders many invariant learning methods inapplicable. In this paper, we propose Heterogeneous Risk Minimization (HRM) framework to achieve joint learning of latent heterogeneity among the data and invariant relationship, which leads to stable prediction despite distributional shifts. We theoretically characterize the roles of the environment labels in invariant learning and justify our newly proposed HRM framework. Extensive experimental results validate the effectiveness of our HRM framework.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21h/liu21h.pdf", "supp": "", "pdf_size": 1678520, "gs_citation": 170, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12299879840182415633&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Technology, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China; School of Economics and Management, Tsinghua University, Beijing, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China", "aff_domain": "gmail.com;gmail.com;tsinghua.edu.cn;sem.tsinghua.edu.cn;mails.tsinghua.edu.cn", "email": "gmail.com;gmail.com;tsinghua.edu.cn;sem.tsinghua.edu.cn;mails.tsinghua.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21h.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "Department of Computer Science and Technology", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Hierarchical Agglomerative Graph Clustering in Nearly-Linear Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9207", "id": "9207", "proceeding": "http://proceedings.mlr.press/v139/dhulipala21a.html", "slides": "", "author_site": "Laxman Dhulipala, David Eisenstat, Jakub \u0141\u0105cki, Vahab Mirrokni, Jessica Shi", "author": "Laxman Dhulipala; David Eisenstat; Jakub \u0141\u0105cki; Vahab Mirrokni; Jessica Shi", "abstract": "We study the widely-used hierarchical agglomerative clustering (HAC) algorithm on edge-weighted graphs. We define an algorithmic framework for hierarchical agglomerative graph clustering that provides the first efficient $\\tilde{O}(m)$ time exact algorithms for classic linkage measures, such as complete- and WPGMA-linkage, as well as other measures. Furthermore, for average-linkage, arguably the most popular variant of HAC, we provide an algorithm that runs in $\\tilde{O}(n\\sqrt{m})$ time. For this variant, this is the first exact algorithm that runs in subquadratic time, as long as $m=n^{2-\\epsilon}$ for some constant $\\epsilon > 0$. We complement this result with a simple $\\epsilon$-close approximation algorithm for average-linkage in our framework that runs in $\\tilde{O}(m)$ time. As an application of our algorithms, we consider clustering points in a metric space by first using $k$-NN to generate a graph from the point set, and then running our algorithms on the resulting weighted graph. We validate the performance of our algorithms on publicly available datasets, and show that our approach can speed up clustering of point datasets by a factor of 20.7\u201376.5x.", "bibtex": "@InProceedings{pmlr-v139-dhulipala21a,\n title = \t {Hierarchical Agglomerative Graph Clustering in Nearly-Linear Time},\n author = {Dhulipala, Laxman and Eisenstat, David and {\\L}{\\k{a}}cki, Jakub and Mirrokni, Vahab and Shi, Jessica},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2676--2686},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dhulipala21a/dhulipala21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dhulipala21a.html},\n abstract = \t {We study the widely-used hierarchical agglomerative clustering (HAC) algorithm on edge-weighted graphs. We define an algorithmic framework for hierarchical agglomerative graph clustering that provides the first efficient $\\tilde{O}(m)$ time exact algorithms for classic linkage measures, such as complete- and WPGMA-linkage, as well as other measures. Furthermore, for average-linkage, arguably the most popular variant of HAC, we provide an algorithm that runs in $\\tilde{O}(n\\sqrt{m})$ time. For this variant, this is the first exact algorithm that runs in subquadratic time, as long as $m=n^{2-\\epsilon}$ for some constant $\\epsilon > 0$. We complement this result with a simple $\\epsilon$-close approximation algorithm for average-linkage in our framework that runs in $\\tilde{O}(m)$ time. As an application of our algorithms, we consider clustering points in a metric space by first using $k$-NN to generate a graph from the point set, and then running our algorithms on the resulting weighted graph. We validate the performance of our algorithms on publicly available datasets, and show that our approach can speed up clustering of point datasets by a factor of 20.7\u201376.5x.}\n}", "pdf": "http://proceedings.mlr.press/v139/dhulipala21a/dhulipala21a.pdf", "supp": "", "pdf_size": 431783, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5297484415833635892&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "MIT CSAIL; Google Research; Google Research; Google Research; MIT CSAIL", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/dhulipala21a.html", "aff_unique_index": "0;1;1;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Google", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Google Research", "aff_unique_url": "https://www.csail.mit.edu;https://research.google", "aff_unique_abbr": "MIT CSAIL;Google Research", "aff_campus_unique_index": "0;1;1;1;0", "aff_campus_unique": "Cambridge;Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Hierarchical Clustering of Data Streams: Scalable Algorithms and Approximation Guarantees", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10115", "id": "10115", "proceeding": "http://proceedings.mlr.press/v139/rajagopalan21a.html", "slides": "", "author_site": "Anand Rajagopalan, Fabio Vitale, Danny Vainstein, Gui Citovsky, Cecilia Procopiuc, Claudio Gentile", "author": "Anand Rajagopalan; Fabio Vitale; Danny Vainstein; Gui Citovsky; Cecilia M Procopiuc; Claudio Gentile", "abstract": "We investigate the problem of hierarchically clustering data streams containing metric data in R^d. We introduce a desirable invariance property for such algorithms, describe a general family of hyperplane-based methods enjoying this property, and analyze two scalable instances of this general family against recently popularized similarity/dissimilarity-based metrics for hierarchical clustering. We prove a number of new results related to the approximation ratios of these algorithms, improving in various ways over the literature on this subject. Finally, since our algorithms are principled but also very practical, we carry out an experimental comparison on both synthetic and real-world datasets showing competitive results against known baselines.", "bibtex": "@InProceedings{pmlr-v139-rajagopalan21a,\n title = \t {Hierarchical Clustering of Data Streams: Scalable Algorithms and Approximation Guarantees},\n author = {Rajagopalan, Anand and Vitale, Fabio and Vainstein, Danny and Citovsky, Gui and Procopiuc, Cecilia M and Gentile, Claudio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8799--8809},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rajagopalan21a/rajagopalan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rajagopalan21a.html},\n abstract = \t {We investigate the problem of hierarchically clustering data streams containing metric data in R^d. We introduce a desirable invariance property for such algorithms, describe a general family of hyperplane-based methods enjoying this property, and analyze two scalable instances of this general family against recently popularized similarity/dissimilarity-based metrics for hierarchical clustering. We prove a number of new results related to the approximation ratios of these algorithms, improving in various ways over the literature on this subject. Finally, since our algorithms are principled but also very practical, we carry out an experimental comparison on both synthetic and real-world datasets showing competitive results against known baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/rajagopalan21a/rajagopalan21a.pdf", "supp": "", "pdf_size": 489531, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1580833967302645739&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Google Research, NY, USA; Lille University and INRIA Lille, France; Tel-Aviv University, Israel; Google Research, NY, USA; Google Research, NY, USA; Google Research, NY, USA", "aff_domain": "google.com;inria.fr; ; ; ; ", "email": "google.com;inria.fr; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/rajagopalan21a.html", "aff_unique_index": "0;1;2;0;0;0", "aff_unique_norm": "Google;Lille University;Tel Aviv University", "aff_unique_dep": "Google Research;;", "aff_unique_url": "https://research.google;https://www.univ-lille.fr;https://www.tau.ac.il", "aff_unique_abbr": "Google;Lille University;TAU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "New York;", "aff_country_unique_index": "0;1;2;0;0;0", "aff_country_unique": "United States;France;Israel" }, { "title": "Hierarchical VAEs Know What They Don\u2019t Know", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9665", "id": "9665", "proceeding": "http://proceedings.mlr.press/v139/havtorn21a.html", "slides": "/media/icml-2021/Slides/9665.pdf", "author_site": "Jakob D. Havtorn, Jes Frellsen, S\u00f8ren Hauberg, Lars Maal\u00f8e", "author": "Jakob D. Havtorn; Jes Frellsen; S\u00f8ren Hauberg; Lars Maal\u00f8e", "abstract": "Deep generative models have been demonstrated as state-of-the-art density estimators. Yet, recent work has found that they often assign a higher likelihood to data from outside the training distribution. This seemingly paradoxical behavior has caused concerns over the quality of the attained density estimates. In the context of hierarchical variational autoencoders, we provide evidence to explain this behavior by out-of-distribution data having in-distribution low-level features. We argue that this is both expected and desirable behavior. With this insight in hand, we develop a fast, scalable and fully unsupervised likelihood-ratio score for OOD detection that requires data to be in-distribution across all feature-levels. We benchmark the method on a vast set of data and model combinations and achieve state-of-the-art results on out-of-distribution detection.", "bibtex": "@InProceedings{pmlr-v139-havtorn21a,\n title = \t {Hierarchical VAEs Know What They Don\u2019t Know},\n author = {Havtorn, Jakob D. and Frellsen, Jes and Hauberg, S{\\o}ren and Maal{\\o}e, Lars},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4117--4128},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/havtorn21a/havtorn21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/havtorn21a.html},\n abstract = \t {Deep generative models have been demonstrated as state-of-the-art density estimators. Yet, recent work has found that they often assign a higher likelihood to data from outside the training distribution. This seemingly paradoxical behavior has caused concerns over the quality of the attained density estimates. In the context of hierarchical variational autoencoders, we provide evidence to explain this behavior by out-of-distribution data having in-distribution low-level features. We argue that this is both expected and desirable behavior. With this insight in hand, we develop a fast, scalable and fully unsupervised likelihood-ratio score for OOD detection that requires data to be in-distribution across all feature-levels. We benchmark the method on a vast set of data and model combinations and achieve state-of-the-art results on out-of-distribution detection.}\n}", "pdf": "http://proceedings.mlr.press/v139/havtorn21a/havtorn21a.pdf", "supp": "", "pdf_size": 798698, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18085038587286173153&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Applied Mathematics and Computer Science, Technical University of Denmark, Kongens Lyngby, Denmark + Corti AI, Copenhagen, Denmark; Department of Applied Mathematics and Computer Science, Technical University of Denmark, Kongens Lyngby, Denmark; Department of Applied Mathematics and Computer Science, Technical University of Denmark, Kongens Lyngby, Denmark; Department of Applied Mathematics and Computer Science, Technical University of Denmark, Kongens Lyngby, Denmark + Corti AI, Copenhagen, Denmark", "aff_domain": "corti.ai; ; ;corti.ai", "email": "corti.ai; ; ;corti.ai", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/havtorn21a.html", "aff_unique_index": "0+1;0;0;0+1", "aff_unique_norm": "Technical University of Denmark;Corti AI", "aff_unique_dep": "Department of Applied Mathematics and Computer Science;", "aff_unique_url": "https://www.tek.dk;", "aff_unique_abbr": "DTU;", "aff_campus_unique_index": "0+1;0;0;0+1", "aff_campus_unique": "Kongens Lyngby;Copenhagen", "aff_country_unique_index": "0+0;0;0;0+0", "aff_country_unique": "Denmark" }, { "title": "High Confidence Generalization for Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9323", "id": "9323", "proceeding": "http://proceedings.mlr.press/v139/kostas21a.html", "slides": "", "author_site": "James Kostas, Yash Chandak, Scott Jordan, Georgios Theocharous, Philip Thomas", "author": "James Kostas; Yash Chandak; Scott M Jordan; Georgios Theocharous; Philip Thomas", "abstract": "We present several classes of reinforcement learning algorithms that safely generalize to Markov decision processes (MDPs) not seen during training. Specifically, we study the setting in which some set of MDPs is accessible for training. The goal is to generalize safely to MDPs that are sampled from the same distribution, but which may not be in the set accessible for training. For various definitions of safety, our algorithms give probabilistic guarantees that agents can safely generalize to MDPs that are sampled from the same distribution but are not necessarily in the training set. These algorithms are a type of Seldonian algorithm (Thomas et al., 2019), which is a class of machine learning algorithms that return models with probabilistic safety guarantees for user-specified definitions of safety.", "bibtex": "@InProceedings{pmlr-v139-kostas21a,\n title = \t {High Confidence Generalization for Reinforcement Learning},\n author = {Kostas, James and Chandak, Yash and Jordan, Scott M and Theocharous, Georgios and Thomas, Philip},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5764--5773},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kostas21a/kostas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kostas21a.html},\n abstract = \t {We present several classes of reinforcement learning algorithms that safely generalize to Markov decision processes (MDPs) not seen during training. Specifically, we study the setting in which some set of MDPs is accessible for training. The goal is to generalize safely to MDPs that are sampled from the same distribution, but which may not be in the set accessible for training. For various definitions of safety, our algorithms give probabilistic guarantees that agents can safely generalize to MDPs that are sampled from the same distribution but are not necessarily in the training set. These algorithms are a type of Seldonian algorithm (Thomas et al., 2019), which is a class of machine learning algorithms that return models with probabilistic safety guarantees for user-specified definitions of safety.}\n}", "pdf": "http://proceedings.mlr.press/v139/kostas21a/kostas21a.pdf", "supp": "", "pdf_size": 476426, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9240198109092131750&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA; College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA; College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA; Adobe Research; College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA", "aff_domain": "umass.edu; ; ; ; ", "email": "umass.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kostas21a.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "University of Massachusetts Amherst;Adobe", "aff_unique_dep": "College of Information and Computer Sciences;Adobe Research", "aff_unique_url": "https://www.umass.edu;https://research.adobe.com", "aff_unique_abbr": "UMass Amherst;Adobe", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Amherst;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "High-Dimensional Gaussian Process Inference with Derivatives", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9649", "id": "9649", "proceeding": "http://proceedings.mlr.press/v139/de-roos21a.html", "slides": "/media/icml-2021/Slides/9649.pdf", "author_site": "Filip de Roos, Alexandra Gessner, Philipp Hennig", "author": "Filip de Roos; Alexandra Gessner; Philipp Hennig", "abstract": "Although it is widely known that Gaussian processes can be conditioned on observations of the gradient, this functionality is of limited use due to the prohibitive computational cost of $\\mathcal{O}(N^3 D^3)$ in data points $N$ and dimension $D$. The dilemma of gradient observations is that a single one of them comes at the same cost as $D$ independent function evaluations, so the latter are often preferred. Careful scrutiny reveals, however, that derivative observations give rise to highly structured kernel Gram matrices for very general classes of kernels (inter alia, stationary kernels). We show that in the \\emph{low-data} regime $N < D$, the Gram matrix can be decomposed in a manner that reduces the cost of inference to $\\mathcal{O}(N^2D + (N^2)^3)$ (i.e.,\u00a0linear in the number of dimensions) and, in special cases, to $\\mathcal{O}(N^2D + N^3)$. This reduction in complexity opens up new use-cases for inference with gradients especially in the high-dimensional regime, where the information-to-cost ratio of gradient observations significantly increases. We demonstrate this potential in a variety of tasks relevant for machine learning, such as optimization and Hamiltonian Monte Carlo with predictive gradients.", "bibtex": "@InProceedings{pmlr-v139-de-roos21a,\n title = \t {High-Dimensional Gaussian Process Inference with Derivatives},\n author = {de Roos, Filip and Gessner, Alexandra and Hennig, Philipp},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2535--2545},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/de-roos21a/de-roos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/de-roos21a.html},\n abstract = \t {Although it is widely known that Gaussian processes can be conditioned on observations of the gradient, this functionality is of limited use due to the prohibitive computational cost of $\\mathcal{O}(N^3 D^3)$ in data points $N$ and dimension $D$. The dilemma of gradient observations is that a single one of them comes at the same cost as $D$ independent function evaluations, so the latter are often preferred. Careful scrutiny reveals, however, that derivative observations give rise to highly structured kernel Gram matrices for very general classes of kernels (inter alia, stationary kernels). We show that in the \\emph{low-data} regime $N < D$, the Gram matrix can be decomposed in a manner that reduces the cost of inference to $\\mathcal{O}(N^2D + (N^2)^3)$ (i.e.,\u00a0linear in the number of dimensions) and, in special cases, to $\\mathcal{O}(N^2D + N^3)$. This reduction in complexity opens up new use-cases for inference with gradients especially in the high-dimensional regime, where the information-to-cost ratio of gradient observations significantly increases. We demonstrate this potential in a variety of tasks relevant for machine learning, such as optimization and Hamiltonian Monte Carlo with predictive gradients.}\n}", "pdf": "http://proceedings.mlr.press/v139/de-roos21a/de-roos21a.pdf", "supp": "", "pdf_size": 738603, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11641458762829721702&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, University of T\u00fcbingen, T\u00fcbingen, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Department of Computer Science, University of T\u00fcbingen, T\u00fcbingen, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Department of Computer Science, University of T\u00fcbingen, T\u00fcbingen, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "tuebingen.mpg.de; ; ", "email": "tuebingen.mpg.de; ; ", "github": "https://github.com/\ufb01dero/gp-derivative", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/de-roos21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of T\u00fcbingen;Max Planck Institute for Intelligent Systems", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.uni-tuebingen.de;https://www.mpi-is.mpg.de", "aff_unique_abbr": ";MPI-IS", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "T\u00fcbingen", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Germany" }, { "title": "High-Performance Large-Scale Image Recognition Without Normalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9957", "id": "9957", "proceeding": "http://proceedings.mlr.press/v139/brock21a.html", "slides": "", "author_site": "Andy Brock, Soham De, Samuel Smith, Karen Simonyan", "author": "Andy Brock; Soham De; Samuel L Smith; Karen Simonyan", "abstract": "Batch normalization is a key component of most image classification models, but it has many undesirable properties stemming from its dependence on the batch size and interactions between examples. Although recent work has succeeded in training deep ResNets without normalization layers, these models do not match the test accuracies of the best batch-normalized networks, and are often unstable for large learning rates or strong data augmentations. In this work, we develop an adaptive gradient clipping technique which overcomes these instabilities, and design a significantly improved class of Normalizer-Free ResNets. Our smaller models match the test accuracy of an EfficientNet-B7 on ImageNet while being up to 8.7x faster to train, and our largest models attain a new state-of-the-art top-1 accuracy of 86.5%. In addition, Normalizer-Free models attain significantly better performance than their batch-normalized counterparts when fine-tuning on ImageNet after large-scale pre-training on a dataset of 300 million labeled images, with our best models obtaining an accuracy of 89.2%.", "bibtex": "@InProceedings{pmlr-v139-brock21a,\n title = \t {High-Performance Large-Scale Image Recognition Without Normalization},\n author = {Brock, Andy and De, Soham and Smith, Samuel L and Simonyan, Karen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1059--1071},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brock21a/brock21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brock21a.html},\n abstract = \t {Batch normalization is a key component of most image classification models, but it has many undesirable properties stemming from its dependence on the batch size and interactions between examples. Although recent work has succeeded in training deep ResNets without normalization layers, these models do not match the test accuracies of the best batch-normalized networks, and are often unstable for large learning rates or strong data augmentations. In this work, we develop an adaptive gradient clipping technique which overcomes these instabilities, and design a significantly improved class of Normalizer-Free ResNets. Our smaller models match the test accuracy of an EfficientNet-B7 on ImageNet while being up to 8.7x faster to train, and our largest models attain a new state-of-the-art top-1 accuracy of 86.5%. In addition, Normalizer-Free models attain significantly better performance than their batch-normalized counterparts when fine-tuning on ImageNet after large-scale pre-training on a dataset of 300 million labeled images, with our best models obtaining an accuracy of 89.2%.}\n}", "pdf": "http://proceedings.mlr.press/v139/brock21a/brock21a.pdf", "supp": "", "pdf_size": 336056, "gs_citation": 668, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5658187953666564891&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "deepmind.com; ; ; ", "email": "deepmind.com; ; ; ", "github": "https://github.com/deepmind/deepmind-research/tree/master/nfnets", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/brock21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "High-dimensional Experimental Design and Kernel Bandits", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9741", "id": "9741", "proceeding": "http://proceedings.mlr.press/v139/camilleri21a.html", "slides": "", "author_site": "Romain Camilleri, Kevin Jamieson, Julian Katz-Samuels", "author": "Romain Camilleri; Kevin Jamieson; Julian Katz-Samuels", "abstract": "In recent years methods from optimal linear experimental design have been leveraged to obtain state of the art results for linear bandits. A design returned from an objective such as G-optimal design is actually a probability distribution over a pool of potential measurement vectors. Consequently, one nuisance of the approach is the task of converting this continuous probability distribution into a discrete assignment of N measurements. While sophisticated rounding techniques have been proposed, in d dimensions they require N to be at least d, d log(log(d)), or d^2 based on the sub-optimality of the solution. In this paper we are interested in settings where N may be much less than d, such as in experimental design in an RKHS where d may be effectively infinite. In this work, we propose a rounding procedure that frees N of any dependence on the dimension d, while achieving nearly the same performance guarantees of existing rounding procedures. We evaluate the procedure against a baseline that projects the problem to a lower dimensional space and performs rounding there, which requires N to just be at least a notion of the effective dimension. We also leverage our new approach in a new algorithm for kernelized bandits to obtain state of the art results for regret minimization and pure exploration. An advantage of our approach over existing UCB-like approaches is that our kernel bandit algorithms are provably robust to model misspecification.", "bibtex": "@InProceedings{pmlr-v139-camilleri21a,\n title = \t {High-dimensional Experimental Design and Kernel Bandits},\n author = {Camilleri, Romain and Jamieson, Kevin and Katz-Samuels, Julian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1227--1237},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/camilleri21a/camilleri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/camilleri21a.html},\n abstract = \t {In recent years methods from optimal linear experimental design have been leveraged to obtain state of the art results for linear bandits. A design returned from an objective such as G-optimal design is actually a probability distribution over a pool of potential measurement vectors. Consequently, one nuisance of the approach is the task of converting this continuous probability distribution into a discrete assignment of N measurements. While sophisticated rounding techniques have been proposed, in d dimensions they require N to be at least d, d log(log(d)), or d^2 based on the sub-optimality of the solution. In this paper we are interested in settings where N may be much less than d, such as in experimental design in an RKHS where d may be effectively infinite. In this work, we propose a rounding procedure that frees N of any dependence on the dimension d, while achieving nearly the same performance guarantees of existing rounding procedures. We evaluate the procedure against a baseline that projects the problem to a lower dimensional space and performs rounding there, which requires N to just be at least a notion of the effective dimension. We also leverage our new approach in a new algorithm for kernelized bandits to obtain state of the art results for regret minimization and pure exploration. An advantage of our approach over existing UCB-like approaches is that our kernel bandit algorithms are provably robust to model misspecification.}\n}", "pdf": "http://proceedings.mlr.press/v139/camilleri21a/camilleri21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15792795576041248400&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/camilleri21a.html" }, { "title": "Homomorphic Sensing: Sparsity and Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8505", "id": "8505", "proceeding": "http://proceedings.mlr.press/v139/peng21a.html", "slides": "", "author_site": "Liangzu Peng, Boshi Wang, Manolis Tsakiris", "author": "Liangzu Peng; Boshi Wang; Manolis Tsakiris", "abstract": "\\emph{Unlabeled sensing} is a recent problem encompassing many data science and engineering applications and typically formulated as solving linear equations whose right-hand side vector has undergone an unknown permutation. It was generalized to the \\emph{homomorphic sensing} problem by replacing the unknown permutation with an unknown linear map from a given finite set of linear maps. In this paper we present tighter and simpler conditions for the homomorphic sensing problem to admit a unique solution. We show that this solution is locally stable under noise, while under a sparsity assumption it remains unique under less demanding conditions. Sparsity in the context of unlabeled sensing leads to the problem of \\textit{unlabeled compressed sensing}, and a consequence of our general theory is the existence under mild conditions of a unique sparsest solution. On the algorithmic level, we solve unlabeled compressed sensing by an iterative algorithm validated by synthetic data experiments. Finally, under the unifying homomorphic sensing framework we connect unlabeled sensing to other important practical problems.", "bibtex": "@InProceedings{pmlr-v139-peng21a,\n title = \t {Homomorphic Sensing: Sparsity and Noise},\n author = {Peng, Liangzu and Wang, Boshi and Tsakiris, Manolis},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8464--8475},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/peng21a/peng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/peng21a.html},\n abstract = \t {\\emph{Unlabeled sensing} is a recent problem encompassing many data science and engineering applications and typically formulated as solving linear equations whose right-hand side vector has undergone an unknown permutation. It was generalized to the \\emph{homomorphic sensing} problem by replacing the unknown permutation with an unknown linear map from a given finite set of linear maps. In this paper we present tighter and simpler conditions for the homomorphic sensing problem to admit a unique solution. We show that this solution is locally stable under noise, while under a sparsity assumption it remains unique under less demanding conditions. Sparsity in the context of unlabeled sensing leads to the problem of \\textit{unlabeled compressed sensing}, and a consequence of our general theory is the existence under mild conditions of a unique sparsest solution. On the algorithmic level, we solve unlabeled compressed sensing by an iterative algorithm validated by synthetic data experiments. Finally, under the unifying homomorphic sensing framework we connect unlabeled sensing to other important practical problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/peng21a/peng21a.pdf", "supp": "", "pdf_size": 351618, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5624760875400532911&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "School of Information Science and Technology, ShanghaiTech University, Shanghai, China; School of Information Science and Technology, ShanghaiTech University, Shanghai, China; School of Information Science and Technology, ShanghaiTech University, Shanghai, China", "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;shanghaitech.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/peng21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ShanghaiTech University", "aff_unique_dep": "School of Information Science and Technology", "aff_unique_url": "https://www.shanghaitech.edu.cn", "aff_unique_abbr": "ShanghaiTech", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Shanghai", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "HoroPCA: Hyperbolic Dimensionality Reduction via Horospherical Projections", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10723", "id": "10723", "proceeding": "http://proceedings.mlr.press/v139/chami21a.html", "slides": "", "author_site": "Ines Chami, Albert Gu, Dat P Nguyen, Christopher Re", "author": "Ines Chami; Albert Gu; Dat P Nguyen; Christopher Re", "abstract": "This paper studies Principal Component Analysis (PCA) for data lying in hyperbolic spaces. Given directions, PCA relies on: (1) a parameterization of subspaces spanned by these directions, (2) a method of projection onto subspaces that preserves information in these directions, and (3) an objective to optimize, namely the variance explained by projections. We generalize each of these concepts to the hyperbolic space and propose HoroPCA, a method for hyperbolic dimensionality reduction. By focusing on the core problem of extracting principal directions, HoroPCA theoretically better preserves information in the original data such as distances, compared to previous generalizations of PCA. Empirically, we validate that HoroPCA outperforms existing dimensionality reduction methods, significantly reducing error in distance preservation. As a data whitening method, it improves downstream classification by up to 3.9% compared to methods that don\u2019t use whitening. Finally, we show that HoroPCA can be used to visualize hyperbolic data in two dimensions.", "bibtex": "@InProceedings{pmlr-v139-chami21a,\n title = \t {HoroPCA: Hyperbolic Dimensionality Reduction via Horospherical Projections},\n author = {Chami, Ines and Gu, Albert and Nguyen, Dat P and Re, Christopher},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1419--1429},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chami21a/chami21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chami21a.html},\n abstract = \t {This paper studies Principal Component Analysis (PCA) for data lying in hyperbolic spaces. Given directions, PCA relies on: (1) a parameterization of subspaces spanned by these directions, (2) a method of projection onto subspaces that preserves information in these directions, and (3) an objective to optimize, namely the variance explained by projections. We generalize each of these concepts to the hyperbolic space and propose HoroPCA, a method for hyperbolic dimensionality reduction. By focusing on the core problem of extracting principal directions, HoroPCA theoretically better preserves information in the original data such as distances, compared to previous generalizations of PCA. Empirically, we validate that HoroPCA outperforms existing dimensionality reduction methods, significantly reducing error in distance preservation. As a data whitening method, it improves downstream classification by up to 3.9% compared to methods that don\u2019t use whitening. Finally, we show that HoroPCA can be used to visualize hyperbolic data in two dimensions.}\n}", "pdf": "http://proceedings.mlr.press/v139/chami21a/chami21a.pdf", "supp": "", "pdf_size": 634002, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=659315534474715116&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Stanford University; Stanford University; Stanford University; Stanford University", "aff_domain": "cs.stanford.edu; ; ; ", "email": "cs.stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chami21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Householder Sketch for Accurate and Accelerated Least-Mean-Squares Solvers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9399", "id": "9399", "proceeding": "http://proceedings.mlr.press/v139/dass21a.html", "slides": "/media/icml-2021/Slides/9399.pdf", "author_site": "Jyotikrishna Dass, Rabi Mahapatra", "author": "Jyotikrishna Dass; Rabi Mahapatra", "abstract": "Least-Mean-Squares (\\textsc{LMS}) solvers comprise a class of fundamental optimization problems such as linear regression, and regularized regressions such as Ridge, LASSO, and Elastic-Net. Data summarization techniques for big data generate summaries called coresets and sketches to speed up model learning under streaming and distributed settings. For example, \\citep{nips2019} design a fast and accurate Caratheodory set on input data to boost the performance of existing \\textsc{LMS} solvers. In retrospect, we explore classical Householder transformation as a candidate for sketching and accurately solving LMS problems. We find it to be a simpler, memory-efficient, and faster alternative that always existed to the above strong baseline. We also present a scalable algorithm based on the construction of distributed Householder sketches to solve \\textsc{LMS} problem across multiple worker nodes. We perform thorough empirical analysis with large synthetic and real datasets to evaluate the performance of Householder sketch and compare with \\citep{nips2019}. Our results show Householder sketch speeds up existing \\textsc{LMS} solvers in the scikit-learn library up to $100$x-$400$x. Also, it is $10$x-$100$x faster than the above baseline with similar numerical stability. The distributed algorithm demonstrates linear scalability with a near-negligible communication overhead.", "bibtex": "@InProceedings{pmlr-v139-dass21a,\n title = \t {Householder Sketch for Accurate and Accelerated Least-Mean-Squares Solvers},\n author = {Dass, Jyotikrishna and Mahapatra, Rabi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2467--2477},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dass21a/dass21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dass21a.html},\n abstract = \t {Least-Mean-Squares (\\textsc{LMS}) solvers comprise a class of fundamental optimization problems such as linear regression, and regularized regressions such as Ridge, LASSO, and Elastic-Net. Data summarization techniques for big data generate summaries called coresets and sketches to speed up model learning under streaming and distributed settings. For example, \\citep{nips2019} design a fast and accurate Caratheodory set on input data to boost the performance of existing \\textsc{LMS} solvers. In retrospect, we explore classical Householder transformation as a candidate for sketching and accurately solving LMS problems. We find it to be a simpler, memory-efficient, and faster alternative that always existed to the above strong baseline. We also present a scalable algorithm based on the construction of distributed Householder sketches to solve \\textsc{LMS} problem across multiple worker nodes. We perform thorough empirical analysis with large synthetic and real datasets to evaluate the performance of Householder sketch and compare with \\citep{nips2019}. Our results show Householder sketch speeds up existing \\textsc{LMS} solvers in the scikit-learn library up to $100$x-$400$x. Also, it is $10$x-$100$x faster than the above baseline with similar numerical stability. The distributed algorithm demonstrates linear scalability with a near-negligible communication overhead.}\n}", "pdf": "http://proceedings.mlr.press/v139/dass21a/dass21a.pdf", "supp": "", "pdf_size": 1097269, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3474778902415415162&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Engineering, Texas A&M University, College Station, TX, USA; Department of Computer Science and Engineering, Texas A&M University, College Station, TX, USA", "aff_domain": "tamu.edu; ", "email": "tamu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/dass21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Texas A&M University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.tamu.edu", "aff_unique_abbr": "TAMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "College Station", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "How Do Adam and Training Strategies Help BNNs Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8613", "id": "8613", "proceeding": "http://proceedings.mlr.press/v139/liu21t.html", "slides": "/media/icml-2021/Slides/8613.pdf", "author_site": "Zechun Liu, Zhiqiang Shen, Shichao Li, Koen Helwegen, Dong Huang, Kwang-Ting Cheng", "author": "Zechun Liu; Zhiqiang Shen; Shichao Li; Koen Helwegen; Dong Huang; Kwang-Ting Cheng", "abstract": "The best performing Binary Neural Networks (BNNs) are usually attained using Adam optimization and its multi-step training variants. However, to the best of our knowledge, few studies explore the fundamental reasons why Adam is superior to other optimizers like SGD for BNN optimization or provide analytical explanations that support specific training strategies. To address this, in this paper we first investigate the trajectories of gradients and weights in BNNs during the training process. We show the regularization effect of second-order momentum in Adam is crucial to revitalize the weights that are dead due to the activation saturation in BNNs. We find that Adam, through its adaptive learning rate strategy, is better equipped to handle the rugged loss surface of BNNs and reaches a better optimum with higher generalization ability. Furthermore, we inspect the intriguing role of the real-valued weights in binary networks, and reveal the effect of weight decay on the stability and sluggishness of BNN optimization. Through extensive experiments and analysis, we derive a simple training scheme, building on existing Adam-based optimization, which achieves 70.5% top-1 accuracy on the ImageNet dataset using the same architecture as the state-of-the-art ReActNet while achieving 1.1% higher accuracy. Code and models are available at https://github.com/liuzechun/AdamBNN.", "bibtex": "@InProceedings{pmlr-v139-liu21t,\n title = \t {How Do Adam and Training Strategies Help BNNs Optimization},\n author = {Liu, Zechun and Shen, Zhiqiang and Li, Shichao and Helwegen, Koen and Huang, Dong and Cheng, Kwang-Ting},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6936--6946},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21t/liu21t.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21t.html},\n abstract = \t {The best performing Binary Neural Networks (BNNs) are usually attained using Adam optimization and its multi-step training variants. However, to the best of our knowledge, few studies explore the fundamental reasons why Adam is superior to other optimizers like SGD for BNN optimization or provide analytical explanations that support specific training strategies. To address this, in this paper we first investigate the trajectories of gradients and weights in BNNs during the training process. We show the regularization effect of second-order momentum in Adam is crucial to revitalize the weights that are dead due to the activation saturation in BNNs. We find that Adam, through its adaptive learning rate strategy, is better equipped to handle the rugged loss surface of BNNs and reaches a better optimum with higher generalization ability. Furthermore, we inspect the intriguing role of the real-valued weights in binary networks, and reveal the effect of weight decay on the stability and sluggishness of BNN optimization. Through extensive experiments and analysis, we derive a simple training scheme, building on existing Adam-based optimization, which achieves 70.5% top-1 accuracy on the ImageNet dataset using the same architecture as the state-of-the-art ReActNet while achieving 1.1% higher accuracy. Code and models are available at https://github.com/liuzechun/AdamBNN.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21t/liu21t.pdf", "supp": "", "pdf_size": 3467975, "gs_citation": 118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5036390425346161514&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Hong Kong University of Science and Technology+Plumerai; Carnegie Mellon University; Hong Kong University of Science and Technology; Plumerai; Carnegie Mellon University; Hong Kong University of Science and Technology", "aff_domain": "connect.ust.hk;andrew.cmu.edu; ; ; ; ", "email": "connect.ust.hk;andrew.cmu.edu; ; ; ; ", "github": "https://github.com/liuzechun/AdamBNN", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/liu21t.html", "aff_unique_index": "0+1;2;0;1;2;0", "aff_unique_norm": "Hong Kong University of Science and Technology;Plumerai;Carnegie Mellon University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ust.hk;https://www.plumerai.com;https://www.cmu.edu", "aff_unique_abbr": "HKUST;Plumerai;CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Hong Kong SAR;", "aff_country_unique_index": "0+1;1;0;1;1;0", "aff_country_unique": "China;United States" }, { "title": "How Does Loss Function Affect Generalization Performance of Deep Learning? Application to Human Age Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9895", "id": "9895", "proceeding": "http://proceedings.mlr.press/v139/akbari21a.html", "slides": "/media/icml-2021/Slides/9895.pdf", "author_site": "Ali Akbari, Muhammad Awais, Manijeh Bashar, Josef Kittler", "author": "Ali Akbari; Muhammad Awais; Manijeh Bashar; Josef Kittler", "abstract": "Good generalization performance across a wide variety of domains caused by many external and internal factors is the fundamental goal of any machine learning algorithm. This paper theoretically proves that the choice of loss function matters for improving the generalization performance of deep learning-based systems. By deriving the generalization error bound for deep neural models trained by stochastic gradient descent, we pinpoint the characteristics of the loss function that is linked to the generalization error and can therefore be used for guiding the loss function selection process. In summary, our main statement in this paper is: choose a stable loss function, generalize better. Focusing on human age estimation from the face which is a challenging topic in computer vision, we then propose a novel loss function for this learning problem. We theoretically prove that the proposed loss function achieves stronger stability, and consequently a tighter generalization error bound, compared to the other common loss functions for this problem. We have supported our findings theoretically, and demonstrated the merits of the guidance process experimentally, achieving significant improvements.", "bibtex": "@InProceedings{pmlr-v139-akbari21a,\n title = \t {How Does Loss Function Affect Generalization Performance of Deep Learning? Application to Human Age Estimation},\n author = {Akbari, Ali and Awais, Muhammad and Bashar, Manijeh and Kittler, Josef},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {141--151},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/akbari21a/akbari21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/akbari21a.html},\n abstract = \t {Good generalization performance across a wide variety of domains caused by many external and internal factors is the fundamental goal of any machine learning algorithm. This paper theoretically proves that the choice of loss function matters for improving the generalization performance of deep learning-based systems. By deriving the generalization error bound for deep neural models trained by stochastic gradient descent, we pinpoint the characteristics of the loss function that is linked to the generalization error and can therefore be used for guiding the loss function selection process. In summary, our main statement in this paper is: choose a stable loss function, generalize better. Focusing on human age estimation from the face which is a challenging topic in computer vision, we then propose a novel loss function for this learning problem. We theoretically prove that the proposed loss function achieves stronger stability, and consequently a tighter generalization error bound, compared to the other common loss functions for this problem. We have supported our findings theoretically, and demonstrated the merits of the guidance process experimentally, achieving significant improvements.}\n}", "pdf": "http://proceedings.mlr.press/v139/akbari21a/akbari21a.pdf", "supp": "", "pdf_size": 519338, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=881508230796818946&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guildford, UK; Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guildford, UK; Institute for Communication Systems (ICS), University of Surrey, Guildford, UK; Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guildford, UK", "aff_domain": "surrey.ac.uk; ; ; ", "email": "surrey.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/akbari21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Surrey", "aff_unique_dep": "Centre for Vision, Speech and Signal Processing (CVSSP)", "aff_unique_url": "https://www.surrey.ac.uk", "aff_unique_abbr": "Surrey", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Guildford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "How Framelets Enhance Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8465", "id": "8465", "proceeding": "http://proceedings.mlr.press/v139/zheng21c.html", "slides": "", "author_site": "Xuebin Zheng, Bingxin Zhou, Junbin Gao, Yuguang Wang, Pietro Li\u00f3, Ming Li, Guido Montufar", "author": "Xuebin Zheng; Bingxin Zhou; Junbin Gao; Yuguang Wang; Pietro Li\u00f3; Ming Li; Guido Montufar", "abstract": "This paper presents a new approach for assembling graph neural networks based on framelet transforms. The latter provides a multi-scale representation for graph-structured data. We decompose an input graph into low-pass and high-pass frequencies coefficients for network training, which then defines a framelet-based graph convolution. The framelet decomposition naturally induces a graph pooling strategy by aggregating the graph feature into low-pass and high-pass spectra, which considers both the feature values and geometry of the graph data and conserves the total information. The graph neural networks with the proposed framelet convolution and pooling achieve state-of-the-art performance in many node and graph prediction tasks. Moreover, we propose shrinkage as a new activation for the framelet convolution, which thresholds high-frequency information at different scales. Compared to ReLU, shrinkage activation improves model performance on denoising and signal compression: noises in both node and structure can be significantly reduced by accurately cutting off the high-pass coefficients from framelet decomposition, and the signal can be compressed to less than half its original size with well-preserved prediction performance.", "bibtex": "@InProceedings{pmlr-v139-zheng21c,\n title = \t {How Framelets Enhance Graph Neural Networks},\n author = {Zheng, Xuebin and Zhou, Bingxin and Gao, Junbin and Wang, Yuguang and Li{\\'o}, Pietro and Li, Ming and Montufar, Guido},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12761--12771},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zheng21c/zheng21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/zheng21c.html},\n abstract = \t {This paper presents a new approach for assembling graph neural networks based on framelet transforms. The latter provides a multi-scale representation for graph-structured data. We decompose an input graph into low-pass and high-pass frequencies coefficients for network training, which then defines a framelet-based graph convolution. The framelet decomposition naturally induces a graph pooling strategy by aggregating the graph feature into low-pass and high-pass spectra, which considers both the feature values and geometry of the graph data and conserves the total information. The graph neural networks with the proposed framelet convolution and pooling achieve state-of-the-art performance in many node and graph prediction tasks. Moreover, we propose shrinkage as a new activation for the framelet convolution, which thresholds high-frequency information at different scales. Compared to ReLU, shrinkage activation improves model performance on denoising and signal compression: noises in both node and structure can be significantly reduced by accurately cutting off the high-pass coefficients from framelet decomposition, and the signal can be compressed to less than half its original size with well-preserved prediction performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/zheng21c/zheng21c.pdf", "supp": "", "pdf_size": 8668761, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13922049936410780570&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The University of Sydney Business School, The University of Sydney, Camperdown, NSW 2006, Australia; The University of Sydney Business School, The University of Sydney, Camperdown, NSW 2006, Australia; The University of Sydney Business School, The University of Sydney, Camperdown, NSW 2006, Australia; Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany+Institute of Natural Sciences and School of Mathematical Sciences, Shanghai Jiao Tong University, China+School of Mathematics and Statistics, The University of New South Wales, Sydney, Australia; Department of Computer Science and Technology, University of Cambridge, Cambridge, United Kingdom; Key Laboratory of Intelligent Education Technology and Application of Zhejiang Province, Zhejiang Normal University, Jinhua, China; Department of Mathematics and Department of Statistics, University of California, Los Angeles, United States+Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany", "aff_domain": "sydney.edu.au;uni.sydney.edu.au;sydney.edu.au;mis.mpg.de;cl.cam.ac.uk;zjnu.edu.cn;math.ucla.edu", "email": "sydney.edu.au;uni.sydney.edu.au;sydney.edu.au;mis.mpg.de;cl.cam.ac.uk;zjnu.edu.cn;math.ucla.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zheng21c.html", "aff_unique_index": "0;0;0;1+2+3;4;5;6+1", "aff_unique_norm": "University of Sydney;Max Planck Institute for Mathematics in the Sciences;Shanghai Jiao Tong University;University of New South Wales;University of Cambridge;Zhejiang Normal University;University of California, Los Angeles", "aff_unique_dep": "Business School;Mathematics;Institute of Natural Sciences, School of Mathematical Sciences;School of Mathematics and Statistics;Department of Computer Science and Technology;Key Laboratory of Intelligent Education Technology and Application;Department of Mathematics", "aff_unique_url": "https://www.sydney.edu.au;https://www.mis.mpg.de;https://www.sjtu.edu.cn;https://www.unsw.edu.au;https://www.cam.ac.uk;http://www.zjnu.edu.cn;https://www.ucla.edu", "aff_unique_abbr": "USYD;MPI MIS;SJTU;UNSW;Cambridge;ZJNU;UCLA", "aff_campus_unique_index": "0;0;0;1+3;4;5;6+1", "aff_campus_unique": "Camperdown;Leipzig;;Sydney;Cambridge;Jinhua;Los Angeles", "aff_country_unique_index": "0;0;0;1+2+0;3;2;4+1", "aff_country_unique": "Australia;Germany;China;United Kingdom;United States" }, { "title": "How Important is the Train-Validation Split in Meta-Learning?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9855", "id": "9855", "proceeding": "http://proceedings.mlr.press/v139/bai21a.html", "slides": "", "author_site": "Yu Bai, Minshuo Chen, Pan Zhou, Tuo Zhao, Jason Lee, Sham Kakade, Huan Wang, Caiming Xiong", "author": "Yu Bai; Minshuo Chen; Pan Zhou; Tuo Zhao; Jason Lee; Sham Kakade; Huan Wang; Caiming Xiong", "abstract": "Meta-learning aims to perform fast adaptation on a new task through learning a \u201cprior\u201d from multiple existing tasks. A common practice in meta-learning is to perform a train-validation split (\\emph{train-val method}) where the prior adapts to the task on one split of the data, and the resulting predictor is evaluated on another split. Despite its prevalence, the importance of the train-validation split is not well understood either in theory or in practice, particularly in comparison to the more direct \\emph{train-train method}, which uses all the per-task data for both training and evaluation. We provide a detailed theoretical study on whether and when the train-validation split is helpful in the linear centroid meta-learning problem. In the agnostic case, we show that the expected loss of the train-val method is minimized at the optimal prior for meta testing, and this is not the case for the train-train method in general without structural assumptions on the data. In contrast, in the realizable case where the data are generated from linear models, we show that both the train-val and train-train losses are minimized at the optimal prior in expectation. Further, perhaps surprisingly, our main result shows that the train-train method achieves a \\emph{strictly better} excess loss in this realizable case, even when the regularization parameter and split ratio are optimally tuned for both methods. Our results highlight that sample splitting may not always be preferable, especially when the data is realizable by the model. We validate our theories by experimentally showing that the train-train method can indeed outperform the train-val method, on both simulations and real meta-learning tasks.", "bibtex": "@InProceedings{pmlr-v139-bai21a,\n title = \t {How Important is the Train-Validation Split in Meta-Learning?},\n author = {Bai, Yu and Chen, Minshuo and Zhou, Pan and Zhao, Tuo and Lee, Jason and Kakade, Sham and Wang, Huan and Xiong, Caiming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {543--553},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bai21a/bai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bai21a.html},\n abstract = \t {Meta-learning aims to perform fast adaptation on a new task through learning a \u201cprior\u201d from multiple existing tasks. A common practice in meta-learning is to perform a train-validation split (\\emph{train-val method}) where the prior adapts to the task on one split of the data, and the resulting predictor is evaluated on another split. Despite its prevalence, the importance of the train-validation split is not well understood either in theory or in practice, particularly in comparison to the more direct \\emph{train-train method}, which uses all the per-task data for both training and evaluation. We provide a detailed theoretical study on whether and when the train-validation split is helpful in the linear centroid meta-learning problem. In the agnostic case, we show that the expected loss of the train-val method is minimized at the optimal prior for meta testing, and this is not the case for the train-train method in general without structural assumptions on the data. In contrast, in the realizable case where the data are generated from linear models, we show that both the train-val and train-train losses are minimized at the optimal prior in expectation. Further, perhaps surprisingly, our main result shows that the train-train method achieves a \\emph{strictly better} excess loss in this realizable case, even when the regularization parameter and split ratio are optimally tuned for both methods. Our results highlight that sample splitting may not always be preferable, especially when the data is realizable by the model. We validate our theories by experimentally showing that the train-train method can indeed outperform the train-val method, on both simulations and real meta-learning tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/bai21a/bai21a.pdf", "supp": "", "pdf_size": 799574, "gs_citation": 92, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9626198558485044892&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Salesforce Research; Georgia Tech; Salesforce Research; Georgia Tech; Princeton University; University of Washington; Salesforce Research; Salesforce Research", "aff_domain": "salesforce.com; ; ; ; ; ; ; ", "email": "salesforce.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/bai21a.html", "aff_unique_index": "0;1;0;1;2;3;0;0", "aff_unique_norm": "Salesforce;Georgia Institute of Technology;Princeton University;University of Washington", "aff_unique_dep": "Salesforce Research;;;", "aff_unique_url": "https://research.salesforce.com;https://www.gatech.edu;https://www.princeton.edu;https://www.washington.edu", "aff_unique_abbr": "Salesforce;Georgia Tech;Princeton;UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "How and Why to Use Experimental Data to Evaluate Methods for Observational Causal Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9159", "id": "9159", "proceeding": "http://proceedings.mlr.press/v139/gentzel21a.html", "slides": "/media/icml-2021/Slides/9159.pdf", "author_site": "Amanda Gentzel, Purva Pruthi, David Jensen", "author": "Amanda M Gentzel; Purva Pruthi; David Jensen", "abstract": "Methods that infer causal dependence from observational data are central to many areas of science, including medicine, economics, and the social sciences. A variety of theoretical properties of these methods have been proven, but empirical evaluation remains a challenge, largely due to the lack of observational data sets for which treatment effect is known. We describe and analyze observational sampling from randomized controlled trials (OSRCT), a method for evaluating causal inference methods using data from randomized controlled trials (RCTs). This method can be used to create constructed observational data sets with corresponding unbiased estimates of treatment effect, substantially increasing the number of data sets available for evaluating causal inference methods. We show that, in expectation, OSRCT creates data sets that are equivalent to those produced by randomly sampling from empirical data sets in which all potential outcomes are available. We then perform a large-scale evaluation of seven causal inference methods over 37 data sets, drawn from RCTs, as well as simulators, real-world computational systems, and observational data sets augmented with a synthetic response variable. We find notable performance differences when comparing across data from different sources, demonstrating the importance of using data from a variety of sources when evaluating any causal inference method.", "bibtex": "@InProceedings{pmlr-v139-gentzel21a,\n title = \t {How and Why to Use Experimental Data to Evaluate Methods for Observational Causal Inference},\n author = {Gentzel, Amanda M and Pruthi, Purva and Jensen, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3660--3671},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gentzel21a/gentzel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gentzel21a.html},\n abstract = \t {Methods that infer causal dependence from observational data are central to many areas of science, including medicine, economics, and the social sciences. A variety of theoretical properties of these methods have been proven, but empirical evaluation remains a challenge, largely due to the lack of observational data sets for which treatment effect is known. We describe and analyze observational sampling from randomized controlled trials (OSRCT), a method for evaluating causal inference methods using data from randomized controlled trials (RCTs). This method can be used to create constructed observational data sets with corresponding unbiased estimates of treatment effect, substantially increasing the number of data sets available for evaluating causal inference methods. We show that, in expectation, OSRCT creates data sets that are equivalent to those produced by randomly sampling from empirical data sets in which all potential outcomes are available. We then perform a large-scale evaluation of seven causal inference methods over 37 data sets, drawn from RCTs, as well as simulators, real-world computational systems, and observational data sets augmented with a synthetic response variable. We find notable performance differences when comparing across data from different sources, demonstrating the importance of using data from a variety of sources when evaluating any causal inference method.}\n}", "pdf": "http://proceedings.mlr.press/v139/gentzel21a/gentzel21a.pdf", "supp": "", "pdf_size": 2208566, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12456468278973351653&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "College of Information and Computer Sciences, University of Massachusetts, Amherst, United States + Leidos, Reston, Virginia, United States; College of Information and Computer Sciences, University of Massachusetts, Amherst, United States; College of Information and Computer Sciences, University of Massachusetts, Amherst, United States", "aff_domain": "leidos.com; ; ", "email": "leidos.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gentzel21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "University of Massachusetts Amherst;Leidos", "aff_unique_dep": "College of Information and Computer Sciences;", "aff_unique_url": "https://www.umass.edu;https://www.leidos.com", "aff_unique_abbr": "UMass Amherst;", "aff_campus_unique_index": "0+1;0;0", "aff_campus_unique": "Amherst;Reston", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "How could Neural Networks understand Programs?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9087", "id": "9087", "proceeding": "http://proceedings.mlr.press/v139/peng21b.html", "slides": "", "author_site": "Dinglan Peng, Shuxin Zheng, Yatao Li, Guolin Ke, Di He, Tie-Yan Liu", "author": "Dinglan Peng; Shuxin Zheng; Yatao Li; Guolin Ke; Di He; Tie-Yan Liu", "abstract": "Semantic understanding of programs is a fundamental problem for programming language processing (PLP). Recent works that learn representations of code based on pre-training techniques in NLP have pushed the frontiers in this direction. However, the semantics of PL and NL have essential differences. These being ignored, we believe it is difficult to build a model to better understand programs, by either directly applying off-the-shelf NLP pre-training techniques to the source code, or adding features to the model by the heuristic. In fact, the semantics of a program can be rigorously defined by formal semantics in PL theory. For example, the operational semantics, describes the meaning of a valid program as updating the environment (i.e., the memory address-value function) through fundamental operations, such as memory I/O and conditional branching. Inspired by this, we propose a novel program semantics learning paradigm, that the model should learn from information composed of (1) the representations which align well with the fundamental operations in operational semantics, and (2) the information of environment transition, which is indispensable for program understanding. To validate our proposal, we present a hierarchical Transformer-based pre-training model called OSCAR to better facilitate the understanding of programs. OSCAR learns from intermediate representation (IR) and an encoded representation derived from static analysis, which are used for representing the fundamental operations and approximating the environment transitions respectively. OSCAR empirically shows the outstanding capability of program semantics understanding on many practical software engineering tasks. Code and models are released at: \\url{https://github.com/pdlan/OSCAR}.", "bibtex": "@InProceedings{pmlr-v139-peng21b,\n title = \t {How could Neural Networks understand Programs?},\n author = {Peng, Dinglan and Zheng, Shuxin and Li, Yatao and Ke, Guolin and He, Di and Liu, Tie-Yan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8476--8486},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/peng21b/peng21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/peng21b.html},\n abstract = \t {Semantic understanding of programs is a fundamental problem for programming language processing (PLP). Recent works that learn representations of code based on pre-training techniques in NLP have pushed the frontiers in this direction. However, the semantics of PL and NL have essential differences. These being ignored, we believe it is difficult to build a model to better understand programs, by either directly applying off-the-shelf NLP pre-training techniques to the source code, or adding features to the model by the heuristic. In fact, the semantics of a program can be rigorously defined by formal semantics in PL theory. For example, the operational semantics, describes the meaning of a valid program as updating the environment (i.e., the memory address-value function) through fundamental operations, such as memory I/O and conditional branching. Inspired by this, we propose a novel program semantics learning paradigm, that the model should learn from information composed of (1) the representations which align well with the fundamental operations in operational semantics, and (2) the information of environment transition, which is indispensable for program understanding. To validate our proposal, we present a hierarchical Transformer-based pre-training model called OSCAR to better facilitate the understanding of programs. OSCAR learns from intermediate representation (IR) and an encoded representation derived from static analysis, which are used for representing the fundamental operations and approximating the environment transitions respectively. OSCAR empirically shows the outstanding capability of program semantics understanding on many practical software engineering tasks. Code and models are released at: \\url{https://github.com/pdlan/OSCAR}.}\n}", "pdf": "http://proceedings.mlr.press/v139/peng21b/peng21b.pdf", "supp": "", "pdf_size": 531895, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16362826083131548815&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of Science and Technology of China; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia", "aff_domain": "ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", "email": "ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", "github": "https://github.com/pdlan/OSCAR", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/peng21b.html", "aff_unique_index": "0;1;1;1;1;1", "aff_unique_norm": "University of Science and Technology of China;Microsoft", "aff_unique_dep": ";Research", "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia", "aff_unique_abbr": "USTC;MSR Asia", "aff_campus_unique_index": "1;1;1;1;1", "aff_campus_unique": ";Asia", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "How rotational invariance of common kernels prevents generalization in high dimensions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8801", "id": "8801", "proceeding": "http://proceedings.mlr.press/v139/donhauser21a.html", "slides": "/media/icml-2021/Slides/8801.pdf", "author_site": "Konstantin Donhauser, Mingqi Wu, Fanny Yang", "author": "Konstantin Donhauser; Mingqi Wu; Fanny Yang", "abstract": "Kernel ridge regression is well-known to achieve minimax optimal rates in low-dimensional settings. However, its behavior in high dimensions is much less understood. Recent work establishes consistency for high-dimensional kernel regression for a number of specific assumptions on the data distribution. In this paper, we show that in high dimensions, the rotational invariance property of commonly studied kernels (such as RBF, inner product kernels and fully-connected NTK of any depth) leads to inconsistent estimation unless the ground truth is a low-degree polynomial. Our lower bound on the generalization error holds for a wide range of distributions and kernels with different eigenvalue decays. This lower bound suggests that consistency results for kernel ridge regression in high dimensions generally require a more refined analysis that depends on the structure of the kernel beyond its eigenvalue decay.", "bibtex": "@InProceedings{pmlr-v139-donhauser21a,\n title = \t {How rotational invariance of common kernels prevents generalization in high dimensions},\n author = {Donhauser, Konstantin and Wu, Mingqi and Yang, Fanny},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2804--2814},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/donhauser21a/donhauser21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/donhauser21a.html},\n abstract = \t {Kernel ridge regression is well-known to achieve minimax optimal rates in low-dimensional settings. However, its behavior in high dimensions is much less understood. Recent work establishes consistency for high-dimensional kernel regression for a number of specific assumptions on the data distribution. In this paper, we show that in high dimensions, the rotational invariance property of commonly studied kernels (such as RBF, inner product kernels and fully-connected NTK of any depth) leads to inconsistent estimation unless the ground truth is a low-degree polynomial. Our lower bound on the generalization error holds for a wide range of distributions and kernels with different eigenvalue decays. This lower bound suggests that consistency results for kernel ridge regression in high dimensions generally require a more refined analysis that depends on the structure of the kernel beyond its eigenvalue decay.}\n}", "pdf": "http://proceedings.mlr.press/v139/donhauser21a/donhauser21a.pdf", "supp": "", "pdf_size": 476440, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15941159767452882886&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, ETH Z\u00fcrich; Department of Computer Science, ETH Z\u00fcrich; Department of Computer Science, ETH Z\u00fcrich", "aff_domain": "ethz.ch; ; ", "email": "ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/donhauser21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "How to Learn when Data Reacts to Your Model: Performative Gradient Descent", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9355", "id": "9355", "proceeding": "http://proceedings.mlr.press/v139/izzo21a.html", "slides": "", "author_site": "Zachary Izzo, Lexing Ying, James Zou", "author": "Zachary Izzo; Lexing Ying; James Zou", "abstract": "Performative distribution shift captures the setting where the choice of which ML model is deployed changes the data distribution. For example, a bank which uses the number of open credit lines to determine a customer\u2019s risk of default on a loan may induce customers to open more credit lines in order to improve their chances of being approved. Because of the interactions between the model and data distribution, finding the optimal model parameters is challenging. Works in this area have focused on finding stable points, which can be far from optimal. Here we introduce \\emph{performative gradient descent} (PerfGD), an algorithm for computing performatively optimal points. Under regularity assumptions on the performative loss, PerfGD is the first algorithm which provably converges to an optimal point. PerfGD explicitly captures how changes in the model affects the data distribution and is simple to use. We support our findings with theory and experiments.", "bibtex": "@InProceedings{pmlr-v139-izzo21a,\n title = \t {How to Learn when Data Reacts to Your Model: Performative Gradient Descent},\n author = {Izzo, Zachary and Ying, Lexing and Zou, James},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4641--4650},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/izzo21a/izzo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/izzo21a.html},\n abstract = \t {Performative distribution shift captures the setting where the choice of which ML model is deployed changes the data distribution. For example, a bank which uses the number of open credit lines to determine a customer\u2019s risk of default on a loan may induce customers to open more credit lines in order to improve their chances of being approved. Because of the interactions between the model and data distribution, finding the optimal model parameters is challenging. Works in this area have focused on finding stable points, which can be far from optimal. Here we introduce \\emph{performative gradient descent} (PerfGD), an algorithm for computing performatively optimal points. Under regularity assumptions on the performative loss, PerfGD is the first algorithm which provably converges to an optimal point. PerfGD explicitly captures how changes in the model affects the data distribution and is simple to use. We support our findings with theory and experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/izzo21a/izzo21a.pdf", "supp": "", "pdf_size": 2283756, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13901114042454326494&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, Stanford University + Institute for Computational and Mathematical Engineering, Stanford University; Department of Mathematics, Stanford University + Institute for Computational and Mathematical Engineering, Stanford University; Department of Biomedical Data Science, Stanford University", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/izzo21a.html", "aff_unique_index": "0+0;0+0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0+0;0+0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "HyperHyperNetwork for the Design of Antenna Arrays", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9671", "id": "9671", "proceeding": "http://proceedings.mlr.press/v139/lutati21a.html", "slides": "", "author_site": "Shahar Lutati, Lior Wolf", "author": "Shahar Lutati; Lior Wolf", "abstract": "We present deep learning methods for the design of arrays and single instances of small antennas. Each design instance is conditioned on a target radiation pattern and is required to conform to specific spatial dimensions and to include, as part of its metallic structure, a set of predetermined locations. The solution, in the case of a single antenna, is based on a composite neural network that combines a simulation network, a hypernetwork, and a refinement network. In the design of the antenna array, we add an additional design level and employ a hypernetwork within a hypernetwork. The learning objective is based on measuring the similarity of the obtained radiation pattern to the desired one. Our experiments demonstrate that our approach is able to design novel antennas and antenna arrays that are compliant with the design requirements, considerably better than the baseline methods. We compare the solutions obtained by our method to existing designs and demonstrate a high level of overlap. When designing the antenna array of a cellular phone, the obtained solution displays improved properties over the existing one.", "bibtex": "@InProceedings{pmlr-v139-lutati21a,\n title = \t {HyperHyperNetwork for the Design of Antenna Arrays},\n author = {Lutati, Shahar and Wolf, Lior},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7214--7223},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lutati21a/lutati21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lutati21a.html},\n abstract = \t {We present deep learning methods for the design of arrays and single instances of small antennas. Each design instance is conditioned on a target radiation pattern and is required to conform to specific spatial dimensions and to include, as part of its metallic structure, a set of predetermined locations. The solution, in the case of a single antenna, is based on a composite neural network that combines a simulation network, a hypernetwork, and a refinement network. In the design of the antenna array, we add an additional design level and employ a hypernetwork within a hypernetwork. The learning objective is based on measuring the similarity of the obtained radiation pattern to the desired one. Our experiments demonstrate that our approach is able to design novel antennas and antenna arrays that are compliant with the design requirements, considerably better than the baseline methods. We compare the solutions obtained by our method to existing designs and demonstrate a high level of overlap. When designing the antenna array of a cellular phone, the obtained solution displays improved properties over the existing one.}\n}", "pdf": "http://proceedings.mlr.press/v139/lutati21a/lutati21a.pdf", "supp": "", "pdf_size": 1818110, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1432615091676375765&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Tel Aviv University; Tel Aviv University + Facebook AI Research", "aff_domain": "gmail.com;gmail.com", "email": "gmail.com;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lutati21a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "Tel Aviv University;Meta", "aff_unique_dep": ";Facebook AI Research", "aff_unique_url": "https://www.tau.ac.il;https://research.facebook.com", "aff_unique_abbr": "TAU;FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+1", "aff_country_unique": "Israel;United States" }, { "title": "Hyperparameter Selection for Imitation Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10565", "id": "10565", "proceeding": "http://proceedings.mlr.press/v139/hussenot21a.html", "slides": "", "author_site": "L\u00e9onard Hussenot, Marcin Andrychowicz, Damien Vincent, Robert Dadashi, Anton Raichuk, Sabela Ramos, Nikola Momchev, Sertan Girgin, Raphael Marinier, Lukasz Stafiniak, Emmanuel Orsini, Olivier Bachem, Matthieu Geist, Olivier Pietquin", "author": "L\u00e9onard Hussenot; Marcin Andrychowicz; Damien Vincent; Robert Dadashi; Anton Raichuk; Sabela Ramos; Nikola Momchev; Sertan Girgin; Raphael Marinier; Lukasz Stafiniak; Manu Orsini; Olivier Bachem; Matthieu Geist; Olivier Pietquin", "abstract": "We address the issue of tuning hyperparameters (HPs) for imitation learning algorithms in the context of continuous-control, when the underlying reward function of the demonstrating expert cannot be observed at any time. The vast literature in imitation learning mostly considers this reward function to be available for HP selection, but this is not a realistic setting. Indeed, would this reward function be available, it could then directly be used for policy training and imitation would not be necessary. To tackle this mostly ignored problem, we propose a number of possible proxies to the external reward. We evaluate them in an extensive empirical study (more than 10\u2019000 agents across 9 environments) and make practical recommendations for selecting HPs. Our results show that while imitation learning algorithms are sensitive to HP choices, it is often possible to select good enough HPs through a proxy to the reward function.", "bibtex": "@InProceedings{pmlr-v139-hussenot21a,\n title = \t {Hyperparameter Selection for Imitation Learning},\n author = {Hussenot, L{\\'e}onard and Andrychowicz, Marcin and Vincent, Damien and Dadashi, Robert and Raichuk, Anton and Ramos, Sabela and Momchev, Nikola and Girgin, Sertan and Marinier, Raphael and Stafiniak, Lukasz and Orsini, Manu and Bachem, Olivier and Geist, Matthieu and Pietquin, Olivier},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4511--4522},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hussenot21a/hussenot21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hussenot21a.html},\n abstract = \t {We address the issue of tuning hyperparameters (HPs) for imitation learning algorithms in the context of continuous-control, when the underlying reward function of the demonstrating expert cannot be observed at any time. The vast literature in imitation learning mostly considers this reward function to be available for HP selection, but this is not a realistic setting. Indeed, would this reward function be available, it could then directly be used for policy training and imitation would not be necessary. To tackle this mostly ignored problem, we propose a number of possible proxies to the external reward. We evaluate them in an extensive empirical study (more than 10\u2019000 agents across 9 environments) and make practical recommendations for selecting HPs. Our results show that while imitation learning algorithms are sensitive to HP choices, it is often possible to select good enough HPs through a proxy to the reward function.}\n}", "pdf": "http://proceedings.mlr.press/v139/hussenot21a/hussenot21a.pdf", "supp": "", "pdf_size": 4126437, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14130455753723701690&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Google Research, Brain Team+Univ. de Lille, CNRS, Inria Scool, UMR 9189 CRIStAL; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team", "aff_domain": "google.com; ; ; ; ; ; ; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 14, "oa": "https://proceedings.mlr.press/v139/hussenot21a.html", "aff_unique_index": "0+1;0;0;0;0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Google;University of Lille", "aff_unique_dep": "Google Research;Inria Scool, UMR 9189 CRIStAL", "aff_unique_url": "https://research.google;https://www.univ-lille.fr", "aff_unique_abbr": "Google;Univ. de Lille", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0+1;0;0;0;0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States;France" }, { "title": "I-BERT: Integer-only BERT Quantization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9811", "id": "9811", "proceeding": "http://proceedings.mlr.press/v139/kim21d.html", "slides": "", "author_site": "Sehoon Kim, Amir Gholaminejad, Zhewei Yao, Michael Mahoney, EECS Kurt Keutzer", "author": "Sehoon Kim; Amir Gholami; Zhewei Yao; Michael W. Mahoney; Kurt Keutzer", "abstract": "Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4- 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.", "bibtex": "@InProceedings{pmlr-v139-kim21d,\n title = \t {I-BERT: Integer-only BERT Quantization},\n author = {Kim, Sehoon and Gholami, Amir and Yao, Zhewei and Mahoney, Michael W. and Keutzer, Kurt},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5506--5518},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21d/kim21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21d.html},\n abstract = \t {Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4- 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21d/kim21d.pdf", "supp": "", "pdf_size": 759423, "gs_citation": 429, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5867141575627629852&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", "email": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kim21d.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Image-Level or Object-Level? A Tale of Two Resampling Strategies for Long-Tailed Detection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9533", "id": "9533", "proceeding": "http://proceedings.mlr.press/v139/chang21c.html", "slides": "", "author_site": "Nadine Chang, Zhiding Yu, Yu-Xiong Wang, Anima Anandkumar, Sanja Fidler, Jose Alvarez", "author": "Nadine Chang; Zhiding Yu; Yu-Xiong Wang; Animashree Anandkumar; Sanja Fidler; Jose M Alvarez", "abstract": "Training on datasets with long-tailed distributions has been challenging for major recognition tasks such as classification and detection. To deal with this challenge, image resampling is typically introduced as a simple but effective approach. However, we observe that long-tailed detection differs from classification since multiple classes may be present in one image. As a result, image resampling alone is not enough to yield a sufficiently balanced distribution at the object-level. We address object-level resampling by introducing an object-centric sampling strategy based on a dynamic, episodic memory bank. Our proposed strategy has two benefits: 1) convenient object-level resampling without significant extra computation, and 2) implicit feature-level augmentation from model updates. We show that image-level and object-level resamplings are both important, and thus unify them with a joint resampling strategy. Our method achieves state-of-the-art performance on the rare categories of LVIS, with 1.89% and 3.13% relative improvements over Forest R-CNN on detection and instance segmentation.", "bibtex": "@InProceedings{pmlr-v139-chang21c,\n title = \t {Image-Level or Object-Level? A Tale of Two Resampling Strategies for Long-Tailed Detection},\n author = {Chang, Nadine and Yu, Zhiding and Wang, Yu-Xiong and Anandkumar, Animashree and Fidler, Sanja and Alvarez, Jose M},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1463--1472},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chang21c/chang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/chang21c.html},\n abstract = \t {Training on datasets with long-tailed distributions has been challenging for major recognition tasks such as classification and detection. To deal with this challenge, image resampling is typically introduced as a simple but effective approach. However, we observe that long-tailed detection differs from classification since multiple classes may be present in one image. As a result, image resampling alone is not enough to yield a sufficiently balanced distribution at the object-level. We address object-level resampling by introducing an object-centric sampling strategy based on a dynamic, episodic memory bank. Our proposed strategy has two benefits: 1) convenient object-level resampling without significant extra computation, and 2) implicit feature-level augmentation from model updates. We show that image-level and object-level resamplings are both important, and thus unify them with a joint resampling strategy. Our method achieves state-of-the-art performance on the rare categories of LVIS, with 1.89% and 3.13% relative improvements over Forest R-CNN on detection and instance segmentation.}\n}", "pdf": "http://proceedings.mlr.press/v139/chang21c/chang21c.pdf", "supp": "", "pdf_size": 3795549, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=121160204477537085&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Carnegie Mellon Univ; NVIDIA; Univ of Illinois at Urbana-Champaign; Caltech; Univ of Toronto; Vector Institute", "aff_domain": "cmu.edu;nvidia.com; ; ; ; ", "email": "cmu.edu;nvidia.com; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/chang21c.html", "aff_unique_index": "0;1;2;3;4;5", "aff_unique_norm": "Carnegie Mellon University;NVIDIA;University of Illinois Urbana-Champaign;California Institute of Technology;University of Toronto;Vector Institute", "aff_unique_dep": ";NVIDIA Corporation;;;;", "aff_unique_url": "https://www.cmu.edu;https://www.nvidia.com;https://illinois.edu;https://www.caltech.edu;https://www.utoronto.ca;https://vectorinstitute.ai/", "aff_unique_abbr": "CMU;NVIDIA;UIUC;Caltech;U of T;Vector Institute", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Urbana-Champaign;Pasadena", "aff_country_unique_index": "0;0;0;0;1;1", "aff_country_unique": "United States;Canada" }, { "title": "Imitation by Predicting Observations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10285", "id": "10285", "proceeding": "http://proceedings.mlr.press/v139/jaegle21b.html", "slides": "", "author_site": "Andrew Jaegle, Yury Sulsky, Arun Ahuja, Jake Bruce, Rob Fergus, Greg Wayne", "author": "Andrew Jaegle; Yury Sulsky; Arun Ahuja; Jake Bruce; Rob Fergus; Greg Wayne", "abstract": "Imitation learning enables agents to reuse and adapt the hard-won expertise of others, offering a solution to several key challenges in learning behavior. Although it is easy to observe behavior in the real-world, the underlying actions may not be accessible. We present a new method for imitation solely from observations that achieves comparable performance to experts on challenging continuous control tasks while also exhibiting robustness in the presence of observations unrelated to the task. Our method, which we call FORM (for \"Future Observation Reward Model\") is derived from an inverse RL objective and imitates using a model of expert behavior learned by generative modelling of the expert\u2019s observations, without needing ground truth actions. We show that FORM performs comparably to a strong baseline IRL method (GAIL) on the DeepMind Control Suite benchmark, while outperforming GAIL in the presence of task-irrelevant features.", "bibtex": "@InProceedings{pmlr-v139-jaegle21b,\n title = \t {Imitation by Predicting Observations},\n author = {Jaegle, Andrew and Sulsky, Yury and Ahuja, Arun and Bruce, Jake and Fergus, Rob and Wayne, Greg},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4665--4676},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jaegle21b/jaegle21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jaegle21b.html},\n abstract = \t {Imitation learning enables agents to reuse and adapt the hard-won expertise of others, offering a solution to several key challenges in learning behavior. Although it is easy to observe behavior in the real-world, the underlying actions may not be accessible. We present a new method for imitation solely from observations that achieves comparable performance to experts on challenging continuous control tasks while also exhibiting robustness in the presence of observations unrelated to the task. Our method, which we call FORM (for \"Future Observation Reward Model\") is derived from an inverse RL objective and imitates using a model of expert behavior learned by generative modelling of the expert\u2019s observations, without needing ground truth actions. We show that FORM performs comparably to a strong baseline IRL method (GAIL) on the DeepMind Control Suite benchmark, while outperforming GAIL in the presence of task-irrelevant features.}\n}", "pdf": "http://proceedings.mlr.press/v139/jaegle21b/jaegle21b.pdf", "supp": "", "pdf_size": 1288134, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17441607708359978984&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", "aff_domain": "deepmind.com; ; ; ; ; ", "email": "deepmind.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/jaegle21b.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Implicit Bias of Linear RNNs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10445", "id": "10445", "proceeding": "http://proceedings.mlr.press/v139/emami21b.html", "slides": "/media/icml-2021/Slides/10445.pdf", "author_site": "Melikasadat Emami, Mojtaba Sahraee-Ardakan, Parthe Pandit, Sundeep Rangan, Alyson Fletcher", "author": "Melikasadat Emami; Mojtaba Sahraee-Ardakan; Parthe Pandit; Sundeep Rangan; Alyson K Fletcher", "abstract": "Contemporary wisdom based on empirical studies suggests that standard recurrent neural networks (RNNs) do not perform well on tasks requiring long-term memory. However, RNNs\u2019 poor ability to capture long-term dependencies has not been fully understood. This paper provides a rigorous explanation of this property in the special case of linear RNNs. Although this work is limited to linear RNNs, even these systems have traditionally been difficult to analyze due to their non-linear parameterization. Using recently-developed kernel regime analysis, our main result shows that as the number of hidden units goes to infinity, linear RNNs learned from random initializations are functionally equivalent to a certain weighted 1D-convolutional network. Importantly, the weightings in the equivalent model cause an implicit bias to elements with smaller time lags in the convolution, and hence shorter memory. The degree of this bias depends on the variance of the transition matrix at initialization and is related to the classic exploding and vanishing gradients problem. The theory is validated with both synthetic and real data experiments.", "bibtex": "@InProceedings{pmlr-v139-emami21b,\n title = \t {Implicit Bias of Linear RNNs},\n author = {Emami, Melikasadat and Sahraee-Ardakan, Mojtaba and Pandit, Parthe and Rangan, Sundeep and Fletcher, Alyson K},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2982--2992},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/emami21b/emami21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/emami21b.html},\n abstract = \t {Contemporary wisdom based on empirical studies suggests that standard recurrent neural networks (RNNs) do not perform well on tasks requiring long-term memory. However, RNNs\u2019 poor ability to capture long-term dependencies has not been fully understood. This paper provides a rigorous explanation of this property in the special case of linear RNNs. Although this work is limited to linear RNNs, even these systems have traditionally been difficult to analyze due to their non-linear parameterization. Using recently-developed kernel regime analysis, our main result shows that as the number of hidden units goes to infinity, linear RNNs learned from random initializations are functionally equivalent to a certain weighted 1D-convolutional network. Importantly, the weightings in the equivalent model cause an implicit bias to elements with smaller time lags in the convolution, and hence shorter memory. The degree of this bias depends on the variance of the transition matrix at initialization and is related to the classic exploding and vanishing gradients problem. The theory is validated with both synthetic and real data experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/emami21b/emami21b.pdf", "supp": "", "pdf_size": 1957887, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4847067604824249711&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, University of California, Los Angeles, Los Angeles, USA+Department of Statistics, University of California, Los Angeles, Los Angeles, USA; Department of Electrical and Computer Engineering, University of California, Los Angeles, Los Angeles, USA+Department of Statistics, University of California, Los Angeles, Los Angeles, USA; Department of Electrical and Computer Engineering, University of California, Los Angeles, Los Angeles, USA+Department of Statistics, University of California, Los Angeles, Los Angeles, USA; Department of Electrical and Computer Engineering, New York University, Brooklyn, New York, USA; Department of Electrical and Computer Engineering, University of California, Los Angeles, Los Angeles, USA+Department of Statistics, University of California, Los Angeles, Los Angeles, USA", "aff_domain": "ucla.edu; ; ; ; ", "email": "ucla.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/emami21b.html", "aff_unique_index": "0+0;0+0;0+0;1;0+0", "aff_unique_norm": "University of California, Los Angeles;New York University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucla.edu;https://www.nyu.edu", "aff_unique_abbr": "UCLA;NYU", "aff_campus_unique_index": "0+0;0+0;0+0;1;0+0", "aff_campus_unique": "Los Angeles;Brooklyn", "aff_country_unique_index": "0+0;0+0;0+0;0;0+0", "aff_country_unique": "United States" }, { "title": "Implicit Regularization in Tensor Factorization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8767", "id": "8767", "proceeding": "http://proceedings.mlr.press/v139/razin21a.html", "slides": "/media/icml-2021/Slides/8767.pdf", "author_site": "Noam Razin, Asaf Maman, Nadav Cohen", "author": "Noam Razin; Asaf Maman; Nadav Cohen", "abstract": "Recent efforts to unravel the mystery of implicit regularization in deep learning have led to a theoretical focus on matrix factorization \u2014 matrix completion via linear neural network. As a step further towards practical deep learning, we provide the first theoretical analysis of implicit regularization in tensor factorization \u2014 tensor completion via certain type of non-linear neural network. We circumvent the notorious difficulty of tensor problems by adopting a dynamical systems perspective, and characterizing the evolution induced by gradient descent. The characterization suggests a form of greedy low tensor rank search, which we rigorously prove under certain conditions, and empirically demonstrate under others. Motivated by tensor rank capturing the implicit regularization of a non-linear neural network, we empirically explore it as a measure of complexity, and find that it captures the essence of datasets on which neural networks generalize. This leads us to believe that tensor rank may pave way to explaining both implicit regularization in deep learning, and the properties of real-world data translating this implicit regularization to generalization.", "bibtex": "@InProceedings{pmlr-v139-razin21a,\n title = \t {Implicit Regularization in Tensor Factorization},\n author = {Razin, Noam and Maman, Asaf and Cohen, Nadav},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8913--8924},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/razin21a/razin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/razin21a.html},\n abstract = \t {Recent efforts to unravel the mystery of implicit regularization in deep learning have led to a theoretical focus on matrix factorization \u2014 matrix completion via linear neural network. As a step further towards practical deep learning, we provide the first theoretical analysis of implicit regularization in tensor factorization \u2014 tensor completion via certain type of non-linear neural network. We circumvent the notorious difficulty of tensor problems by adopting a dynamical systems perspective, and characterizing the evolution induced by gradient descent. The characterization suggests a form of greedy low tensor rank search, which we rigorously prove under certain conditions, and empirically demonstrate under others. Motivated by tensor rank capturing the implicit regularization of a non-linear neural network, we empirically explore it as a measure of complexity, and find that it captures the essence of datasets on which neural networks generalize. This leads us to believe that tensor rank may pave way to explaining both implicit regularization in deep learning, and the properties of real-world data translating this implicit regularization to generalization.}\n}", "pdf": "http://proceedings.mlr.press/v139/razin21a/razin21a.pdf", "supp": "", "pdf_size": 1003197, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4594323532805369080&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Blavatnik School of Computer Science, Tel Aviv University, Israel; Blavatnik School of Computer Science, Tel Aviv University, Israel; Blavatnik School of Computer Science, Tel Aviv University, Israel", "aff_domain": "cs.tau.ac.il;mail.tau.ac.il; ", "email": "cs.tau.ac.il;mail.tau.ac.il; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/razin21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Tel Aviv University", "aff_unique_dep": "Blavatnik School of Computer Science", "aff_unique_url": "https://www.tau.ac.il", "aff_unique_abbr": "TAU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Implicit rate-constrained optimization of non-decomposable objectives", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9587", "id": "9587", "proceeding": "http://proceedings.mlr.press/v139/kumar21b.html", "slides": "/media/icml-2021/Slides/9587.pdf", "author_site": "Abhishek Kumar, Harikrishna Narasimhan, Andrew Cotter", "author": "Abhishek Kumar; Harikrishna Narasimhan; Andrew Cotter", "abstract": "We consider a popular family of constrained optimization problems arising in machine learning that involve optimizing a non-decomposable evaluation metric with a certain thresholded form, while constraining another metric of interest. Examples of such problems include optimizing false negative rate at a fixed false positive rate, optimizing precision at a fixed recall, optimizing the area under the precision-recall or ROC curves, etc. Our key idea is to formulate a rate-constrained optimization that expresses the threshold parameter as a function of the model parameters via the Implicit Function theorem. We show how the resulting optimization problem can be solved using standard gradient based methods. Experiments on benchmark datasets demonstrate the effectiveness of our proposed method over existing state-of-the-art approaches for these problems.", "bibtex": "@InProceedings{pmlr-v139-kumar21b,\n title = \t {Implicit rate-constrained optimization of non-decomposable objectives},\n author = {Kumar, Abhishek and Narasimhan, Harikrishna and Cotter, Andrew},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5861--5871},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kumar21b/kumar21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kumar21b.html},\n abstract = \t {We consider a popular family of constrained optimization problems arising in machine learning that involve optimizing a non-decomposable evaluation metric with a certain thresholded form, while constraining another metric of interest. Examples of such problems include optimizing false negative rate at a fixed false positive rate, optimizing precision at a fixed recall, optimizing the area under the precision-recall or ROC curves, etc. Our key idea is to formulate a rate-constrained optimization that expresses the threshold parameter as a function of the model parameters via the Implicit Function theorem. We show how the resulting optimization problem can be solved using standard gradient based methods. Experiments on benchmark datasets demonstrate the effectiveness of our proposed method over existing state-of-the-art approaches for these problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/kumar21b/kumar21b.pdf", "supp": "", "pdf_size": 513059, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17472849056424055687&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research; Google Research; Google Research", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kumar21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Implicit-PDF: Non-Parametric Representation of Probability Distributions on the Rotation Manifold", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10375", "id": "10375", "proceeding": "http://proceedings.mlr.press/v139/murphy21a.html", "slides": "", "author_site": "Kieran Murphy, Carlos Esteves, Varun Jampani, Srikumar Ramalingam, Ameesh Makadia", "author": "Kieran A Murphy; Carlos Esteves; Varun Jampani; Srikumar Ramalingam; Ameesh Makadia", "abstract": "In the deep learning era, the vast majority of methods to predict pose from a single image are trained to classify or regress to a single given ground truth pose per image. Such methods have two main shortcomings, i) they cannot represent uncertainty about the predictions, and ii) they cannot handle symmetric objects, where multiple (potentially infinite) poses may be correct. Only recently these shortcomings have been addressed, but current approaches as limited in that they cannot express the full rich space of distributions on the rotation manifold. To this end, we introduce a method to estimate arbitrary, non-parametric distributions on SO(3). Our key idea is to represent the distributions implicitly, with a neural network that estimates the probability density, given the input image and a candidate pose. At inference time, grid sampling or gradient ascent can be used to find the most likely pose, but it is also possible to evaluate the density at any pose, enabling reasoning about symmetries and uncertainty. This is the most general way of representing distributions on manifolds, and to demonstrate its expressive power we introduce a new dataset containing symmetric and nearly-symmetric objects. Our method also shows advantages on the popular object pose estimation benchmarks ModelNet10-SO(3) and T-LESS. Code, data, and visualizations may be found at implicit-pdf.github.io.", "bibtex": "@InProceedings{pmlr-v139-murphy21a,\n title = \t {Implicit-PDF: Non-Parametric Representation of Probability Distributions on the Rotation Manifold},\n author = {Murphy, Kieran A and Esteves, Carlos and Jampani, Varun and Ramalingam, Srikumar and Makadia, Ameesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7882--7893},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/murphy21a/murphy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/murphy21a.html},\n abstract = \t {In the deep learning era, the vast majority of methods to predict pose from a single image are trained to classify or regress to a single given ground truth pose per image. Such methods have two main shortcomings, i) they cannot represent uncertainty about the predictions, and ii) they cannot handle symmetric objects, where multiple (potentially infinite) poses may be correct. Only recently these shortcomings have been addressed, but current approaches as limited in that they cannot express the full rich space of distributions on the rotation manifold. To this end, we introduce a method to estimate arbitrary, non-parametric distributions on SO(3). Our key idea is to represent the distributions implicitly, with a neural network that estimates the probability density, given the input image and a candidate pose. At inference time, grid sampling or gradient ascent can be used to find the most likely pose, but it is also possible to evaluate the density at any pose, enabling reasoning about symmetries and uncertainty. This is the most general way of representing distributions on manifolds, and to demonstrate its expressive power we introduce a new dataset containing symmetric and nearly-symmetric objects. Our method also shows advantages on the popular object pose estimation benchmarks ModelNet10-SO(3) and T-LESS. Code, data, and visualizations may be found at implicit-pdf.github.io.}\n}", "pdf": "http://proceedings.mlr.press/v139/murphy21a/murphy21a.pdf", "supp": "", "pdf_size": 10150402, "gs_citation": 90, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11923601301560490381&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, New York, NY, USA", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "implicit-pdf.github.io", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/murphy21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Improved Algorithms for Agnostic Pool-based Active Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9979", "id": "9979", "proceeding": "http://proceedings.mlr.press/v139/katz-samuels21a.html", "slides": "", "author_site": "Julian Katz-Samuels, Jifan Zhang, Lalit Jain, Kevin Jamieson", "author": "Julian Katz-Samuels; Jifan Zhang; Lalit Jain; Kevin Jamieson", "abstract": "We consider active learning for binary classification in the agnostic pool-based setting. The vast majority of works in active learning in the agnostic setting are inspired by the CAL algorithm where each query is uniformly sampled from the disagreement region of the current version space. The sample complexity of such algorithms is described by a quantity known as the disagreement coefficient which captures both the geometry of the hypothesis space as well as the underlying probability space. To date, the disagreement coefficient has been justified by minimax lower bounds only, leaving the door open for superior instance dependent sample complexities. In this work we propose an algorithm that, in contrast to uniform sampling over the disagreement region, solves an experimental design problem to determine a distribution over examples from which to request labels. We show that the new approach achieves sample complexity bounds that are never worse than the best disagreement coefficient-based bounds, but in specific cases can be dramatically smaller. From a practical perspective, the proposed algorithm requires no hyperparameters to tune (e.g., to control the aggressiveness of sampling), and is computationally efficient by means of assuming access to an empirical risk minimization oracle (without any constraints). Empirically, we demonstrate that our algorithm is superior to state of the art agnostic active learning algorithms on image classification datasets.", "bibtex": "@InProceedings{pmlr-v139-katz-samuels21a,\n title = \t {Improved Algorithms for Agnostic Pool-based Active Classification},\n author = {Katz-Samuels, Julian and Zhang, Jifan and Jain, Lalit and Jamieson, Kevin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5334--5344},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/katz-samuels21a/katz-samuels21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/katz-samuels21a.html},\n abstract = \t {We consider active learning for binary classification in the agnostic pool-based setting. The vast majority of works in active learning in the agnostic setting are inspired by the CAL algorithm where each query is uniformly sampled from the disagreement region of the current version space. The sample complexity of such algorithms is described by a quantity known as the disagreement coefficient which captures both the geometry of the hypothesis space as well as the underlying probability space. To date, the disagreement coefficient has been justified by minimax lower bounds only, leaving the door open for superior instance dependent sample complexities. In this work we propose an algorithm that, in contrast to uniform sampling over the disagreement region, solves an experimental design problem to determine a distribution over examples from which to request labels. We show that the new approach achieves sample complexity bounds that are never worse than the best disagreement coefficient-based bounds, but in specific cases can be dramatically smaller. From a practical perspective, the proposed algorithm requires no hyperparameters to tune (e.g., to control the aggressiveness of sampling), and is computationally efficient by means of assuming access to an empirical risk minimization oracle (without any constraints). Empirically, we demonstrate that our algorithm is superior to state of the art agnostic active learning algorithms on image classification datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/katz-samuels21a/katz-samuels21a.pdf", "supp": "", "pdf_size": 1959658, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13050635397333646987&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Wisconsin, Madison, WI; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA", "aff_domain": "cs.washington.edu; ; ;cs.washington.edu", "email": "cs.washington.edu; ; ;cs.washington.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/katz-samuels21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of Wisconsin-Madison;University of Washington", "aff_unique_dep": ";Paul G. Allen School of Computer Science and Engineering", "aff_unique_url": "https://www.wisc.edu;https://www.washington.edu", "aff_unique_abbr": "UW-Madison;UW", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Madison;Seattle", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Improved Confidence Bounds for the Linear Logistic Model and Applications to Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8411", "id": "8411", "proceeding": "http://proceedings.mlr.press/v139/jun21a.html", "slides": "/media/icml-2021/Slides/8411.pdf", "author_site": "Kwang-Sung Jun, Lalit Jain, Blake Mason, Houssam Nassif", "author": "Kwang-Sung Jun; Lalit Jain; Blake Mason; Houssam Nassif", "abstract": "We propose improved fixed-design confidence bounds for the linear logistic model. Our bounds significantly improve upon the state-of-the-art bound by Li et al. (2017) via recent developments of the self-concordant analysis of the logistic loss (Faury et al., 2020). Specifically, our confidence bound avoids a direct dependence on $1/\\kappa$, where $\\kappa$ is the minimal variance over all arms\u2019 reward distributions. In general, $1/\\kappa$ scales exponentially with the norm of the unknown linear parameter $\\theta^*$. Instead of relying on this worst case quantity, our confidence bound for the reward of any given arm depends directly on the variance of that arm\u2019s reward distribution. We present two applications of our novel bounds to pure exploration and regret minimization logistic bandits improving upon state-of-the-art performance guarantees. For pure exploration we also provide a lower bound highlighting a dependence on $1/\\kappa$ for a family of instances.", "bibtex": "@InProceedings{pmlr-v139-jun21a,\n title = \t {Improved Confidence Bounds for the Linear Logistic Model and Applications to Bandits},\n author = {Jun, Kwang-Sung and Jain, Lalit and Mason, Blake and Nassif, Houssam},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5148--5157},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jun21a/jun21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jun21a.html},\n abstract = \t {We propose improved fixed-design confidence bounds for the linear logistic model. Our bounds significantly improve upon the state-of-the-art bound by Li et al. (2017) via recent developments of the self-concordant analysis of the logistic loss (Faury et al., 2020). Specifically, our confidence bound avoids a direct dependence on $1/\\kappa$, where $\\kappa$ is the minimal variance over all arms\u2019 reward distributions. In general, $1/\\kappa$ scales exponentially with the norm of the unknown linear parameter $\\theta^*$. Instead of relying on this worst case quantity, our confidence bound for the reward of any given arm depends directly on the variance of that arm\u2019s reward distribution. We present two applications of our novel bounds to pure exploration and regret minimization logistic bandits improving upon state-of-the-art performance guarantees. For pure exploration we also provide a lower bound highlighting a dependence on $1/\\kappa$ for a family of instances.}\n}", "pdf": "http://proceedings.mlr.press/v139/jun21a/jun21a.pdf", "supp": "", "pdf_size": 471827, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14922885816232254053&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "University of Arizona; University of Washington; University of Wisconsin; Amazon Inc.", "aff_domain": "cs.arizona.edu; ; ; ", "email": "cs.arizona.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jun21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of Arizona;University of Washington;University of Wisconsin;Amazon", "aff_unique_dep": ";;;Amazon", "aff_unique_url": "https://www.arizona.edu;https://www.washington.edu;https://www.wisc.edu;https://www.amazon.com", "aff_unique_abbr": "UA;UW;UW;Amazon", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Improved Contrastive Divergence Training of Energy-Based Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10055", "id": "10055", "proceeding": "http://proceedings.mlr.press/v139/du21b.html", "slides": "", "author_site": "Yilun Du, Shuang Li, Josh Tenenbaum, Igor Mordatch", "author": "Yilun Du; Shuang Li; Joshua Tenenbaum; Igor Mordatch", "abstract": "Contrastive divergence is a popular method of training energy-based models, but is known to have difficulties with training stability. We propose an adaptation to improve contrastive divergence training by scrutinizing a gradient term that is difficult to calculate and is often left out for convenience. We show that this gradient term is numerically significant and in practice is important to avoid training instabilities, while being tractable to estimate. We further highlight how data augmentation and multi-scale processing can be used to improve model robustness and generation quality. Finally, we empirically evaluate stability of model architectures and show improved performance on a host of benchmarks and use cases, such as image generation, OOD detection, and compositional generation.", "bibtex": "@InProceedings{pmlr-v139-du21b,\n title = \t {Improved Contrastive Divergence Training of Energy-Based Models},\n author = {Du, Yilun and Li, Shuang and Tenenbaum, Joshua and Mordatch, Igor},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2837--2848},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21b/du21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21b.html},\n abstract = \t {Contrastive divergence is a popular method of training energy-based models, but is known to have difficulties with training stability. We propose an adaptation to improve contrastive divergence training by scrutinizing a gradient term that is difficult to calculate and is often left out for convenience. We show that this gradient term is numerically significant and in practice is important to avoid training instabilities, while being tractable to estimate. We further highlight how data augmentation and multi-scale processing can be used to improve model robustness and generation quality. Finally, we empirically evaluate stability of model architectures and show improved performance on a host of benchmarks and use cases, such as image generation, OOD detection, and compositional generation.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21b/du21b.pdf", "supp": "", "pdf_size": 8472237, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11512717890022848586&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MIT CSAIL; MIT CSAIL; MIT CSAIL; Google Brain", "aff_domain": "mit.edu; ; ; ", "email": "mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/du21b.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Google", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;Google Brain", "aff_unique_url": "https://www.csail.mit.edu;https://brain.google.com", "aff_unique_abbr": "MIT CSAIL;Google Brain", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Cambridge;Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Improved Corruption Robust Algorithms for Episodic Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8709", "id": "8709", "proceeding": "http://proceedings.mlr.press/v139/chen21d.html", "slides": "/media/icml-2021/Slides/8709.pdf", "author_site": "Yifang Chen, Simon Du, Kevin Jamieson", "author": "Yifang Chen; Simon Du; Kevin Jamieson", "abstract": "We study episodic reinforcement learning under unknown adversarial corruptions in both the rewards and the transition probabilities of the underlying system. We propose new algorithms which, compared to the existing results in \\cite{lykouris2020corruption}, achieve strictly better regret bounds in terms of total corruptions for the tabular setting. To be specific, firstly, our regret bounds depend on more precise numerical values of total rewards corruptions and transition corruptions, instead of only on the total number of corrupted episodes. Secondly, our regret bounds are the first of their kind in the reinforcement learning setting to have the number of corruptions show up additively with respect to $\\min\\{ \\sqrt{T},\\text{PolicyGapComplexity} \\}$ rather than multiplicatively. Our results follow from a general algorithmic framework that combines corruption-robust policy elimination meta-algorithms, and plug-in reward-free exploration sub-algorithms. Replacing the meta-algorithm or sub-algorithm may extend the framework to address other corrupted settings with potentially more structure.", "bibtex": "@InProceedings{pmlr-v139-chen21d,\n title = \t {Improved Corruption Robust Algorithms for Episodic Reinforcement Learning},\n author = {Chen, Yifang and Du, Simon and Jamieson, Kevin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1561--1570},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21d/chen21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21d.html},\n abstract = \t {We study episodic reinforcement learning under unknown adversarial corruptions in both the rewards and the transition probabilities of the underlying system. We propose new algorithms which, compared to the existing results in \\cite{lykouris2020corruption}, achieve strictly better regret bounds in terms of total corruptions for the tabular setting. To be specific, firstly, our regret bounds depend on more precise numerical values of total rewards corruptions and transition corruptions, instead of only on the total number of corrupted episodes. Secondly, our regret bounds are the first of their kind in the reinforcement learning setting to have the number of corruptions show up additively with respect to $\\min\\{ \\sqrt{T},\\text{PolicyGapComplexity} \\}$ rather than multiplicatively. Our results follow from a general algorithmic framework that combines corruption-robust policy elimination meta-algorithms, and plug-in reward-free exploration sub-algorithms. Replacing the meta-algorithm or sub-algorithm may extend the framework to address other corrupted settings with potentially more structure.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21d/chen21d.pdf", "supp": "", "pdf_size": 294926, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12699847847811146485&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington", "aff_domain": "cs.washington.edu;cs.washington.edu;cs.washington.edu", "email": "cs.washington.edu;cs.washington.edu;cs.washington.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chen21d.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Improved Denoising Diffusion Probabilistic Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9531", "id": "9531", "proceeding": "http://proceedings.mlr.press/v139/nichol21a.html", "slides": "", "author_site": "Alexander Nichol, Prafulla Dhariwal", "author": "Alexander Quinn Nichol; Prafulla Dhariwal", "abstract": "Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code and pre-trained models at https://github.com/openai/improved-diffusion.", "bibtex": "@InProceedings{pmlr-v139-nichol21a,\n title = \t {Improved Denoising Diffusion Probabilistic Models},\n author = {Nichol, Alexander Quinn and Dhariwal, Prafulla},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8162--8171},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nichol21a/nichol21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nichol21a.html},\n abstract = \t {Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code and pre-trained models at https://github.com/openai/improved-diffusion.}\n}", "pdf": "http://proceedings.mlr.press/v139/nichol21a/nichol21a.pdf", "supp": "", "pdf_size": 1167544, "gs_citation": 4295, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2227179395488568184&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "OpenAI, San Francisco, USA; OpenAI, San Francisco, USA", "aff_domain": "openai.com;openai.com", "email": "openai.com;openai.com", "github": "https://github.com/openai/improved-diffusion", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/nichol21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "OpenAI", "aff_unique_dep": "", "aff_unique_url": "https://openai.com", "aff_unique_abbr": "OpenAI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Francisco", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Improved OOD Generalization via Adversarial Training and Pretraing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10511", "id": "10511", "proceeding": "http://proceedings.mlr.press/v139/yi21a.html", "slides": "", "author_site": "Mingyang Yi, Lu Hou, Jiacheng Sun, Lifeng Shang, Xin Jiang, Qun Liu, Zhiming Ma", "author": "Mingyang Yi; Lu Hou; Jiacheng Sun; Lifeng Shang; Xin Jiang; Qun Liu; Zhiming Ma", "abstract": "Recently, learning a model that generalizes well on out-of-distribution (OOD) data has attracted great attention in the machine learning community. In this paper, after defining OOD generalization by Wasserstein distance, we theoretically justify that a model robust to input perturbation also generalizes well on OOD data. Inspired by previous findings that adversarial training helps improve robustness, we show that models trained by adversarial training have converged excess risk on OOD data. Besides, in the paradigm of pre-training then fine-tuning, we theoretically justify that the input perturbation robust model in the pre-training stage provides an initialization that generalizes well on downstream OOD data. Finally, various experiments conducted on image classification and natural language understanding tasks verify our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-yi21a,\n title = \t {Improved OOD Generalization via Adversarial Training and Pretraing},\n author = {Yi, Mingyang and Hou, Lu and Sun, Jiacheng and Shang, Lifeng and Jiang, Xin and Liu, Qun and Ma, Zhiming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11987--11997},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yi21a/yi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yi21a.html},\n abstract = \t {Recently, learning a model that generalizes well on out-of-distribution (OOD) data has attracted great attention in the machine learning community. In this paper, after defining OOD generalization by Wasserstein distance, we theoretically justify that a model robust to input perturbation also generalizes well on OOD data. Inspired by previous findings that adversarial training helps improve robustness, we show that models trained by adversarial training have converged excess risk on OOD data. Besides, in the paradigm of pre-training then fine-tuning, we theoretically justify that the input perturbation robust model in the pre-training stage provides an initialization that generalizes well on downstream OOD data. Finally, various experiments conducted on image classification and natural language understanding tasks verify our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/yi21a/yi21a.pdf", "supp": "", "pdf_size": 608514, "gs_citation": 86, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=834559146486210931&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/yi21a.html" }, { "title": "Improved Regret Bound and Experience Replay in Regularized Policy Iteration", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9177", "id": "9177", "proceeding": "http://proceedings.mlr.press/v139/lazic21a.html", "slides": "", "author_site": "Nevena Lazic, Dong Yin, Yasin Abbasi-Yadkori, Csaba Szepesvari", "author": "Nevena Lazic; Dong Yin; Yasin Abbasi-Yadkori; Csaba Szepesvari", "abstract": "In this work, we study algorithms for learning in infinite-horizon undiscounted Markov decision processes (MDPs) with function approximation. We first show that the regret analysis of the Politex algorithm (a version of regularized policy iteration) can be sharpened from $O(T^{3/4})$ to $O(\\sqrt{T})$ under nearly identical assumptions, and instantiate the bound with linear function approximation. Our result provides the first high-probability $O(\\sqrt{T})$ regret bound for a computationally efficient algorithm in this setting. The exact implementation of Politex with neural network function approximation is inefficient in terms of memory and computation. Since our analysis suggests that we need to approximate the average of the action-value functions of past policies well, we propose a simple efficient implementation where we train a single Q-function on a replay buffer with past data. We show that this often leads to superior performance over other implementation choices, especially in terms of wall-clock time. Our work also provides a novel theoretical justification for using experience replay within policy iteration algorithms.", "bibtex": "@InProceedings{pmlr-v139-lazic21a,\n title = \t {Improved Regret Bound and Experience Replay in Regularized Policy Iteration},\n author = {Lazic, Nevena and Yin, Dong and Abbasi-Yadkori, Yasin and Szepesvari, Csaba},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6032--6042},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lazic21a/lazic21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lazic21a.html},\n abstract = \t {In this work, we study algorithms for learning in infinite-horizon undiscounted Markov decision processes (MDPs) with function approximation. We first show that the regret analysis of the Politex algorithm (a version of regularized policy iteration) can be sharpened from $O(T^{3/4})$ to $O(\\sqrt{T})$ under nearly identical assumptions, and instantiate the bound with linear function approximation. Our result provides the first high-probability $O(\\sqrt{T})$ regret bound for a computationally efficient algorithm in this setting. The exact implementation of Politex with neural network function approximation is inefficient in terms of memory and computation. Since our analysis suggests that we need to approximate the average of the action-value functions of past policies well, we propose a simple efficient implementation where we train a single Q-function on a replay buffer with past data. We show that this often leads to superior performance over other implementation choices, especially in terms of wall-clock time. Our work also provides a novel theoretical justification for using experience replay within policy iteration algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/lazic21a/lazic21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4012792519229138923&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lazic21a.html" }, { "title": "Improved Regret Bounds of Bilinear Bandits using Action Space Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10289", "id": "10289", "proceeding": "http://proceedings.mlr.press/v139/jang21a.html", "slides": "/media/icml-2021/Slides/10289.pdf", "author_site": "Kyoungseok Jang, Kwang-Sung Jun, Se-Young Yun, Wanmo Kang", "author": "Kyoungseok Jang; Kwang-Sung Jun; Se-Young Yun; Wanmo Kang", "abstract": "We consider the bilinear bandit problem where the learner chooses a pair of arms, each from two different action spaces of dimension $d_1$ and $d_2$, respectively. The learner then receives a reward whose expectation is a bilinear function of the two chosen arms with an unknown matrix parameter $\\Theta^*\\in\\mathbb{R}^{d_1 \\times d_2}$ with rank $r$. Despite abundant applications such as drug discovery, the optimal regret rate is unknown for this problem, though it was conjectured to be $\\tilde O(\\sqrt{d_1d_2(d_1+d_2)r T})$ by Jun et al. (2019) where $\\tilde O$ ignores polylogarithmic factors in $T$. In this paper, we make progress towards closing the gap between the upper and lower bound on the optimal regret. First, we reject the conjecture above by proposing algorithms that achieve the regret $\\tilde O(\\sqrt{d_1 d_2 (d_1+d_2) T})$ using the fact that the action space dimension $O(d_1+d_2)$ is significantly lower than the matrix parameter dimension $O(d_1 d_2)$. Second, we additionally devise an algorithm with better empirical performance than previous algorithms.", "bibtex": "@InProceedings{pmlr-v139-jang21a,\n title = \t {Improved Regret Bounds of Bilinear Bandits using Action Space Analysis},\n author = {Jang, Kyoungseok and Jun, Kwang-Sung and Yun, Se-Young and Kang, Wanmo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4744--4754},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jang21a/jang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jang21a.html},\n abstract = \t {We consider the bilinear bandit problem where the learner chooses a pair of arms, each from two different action spaces of dimension $d_1$ and $d_2$, respectively. The learner then receives a reward whose expectation is a bilinear function of the two chosen arms with an unknown matrix parameter $\\Theta^*\\in\\mathbb{R}^{d_1 \\times d_2}$ with rank $r$. Despite abundant applications such as drug discovery, the optimal regret rate is unknown for this problem, though it was conjectured to be $\\tilde O(\\sqrt{d_1d_2(d_1+d_2)r T})$ by Jun et al. (2019) where $\\tilde O$ ignores polylogarithmic factors in $T$. In this paper, we make progress towards closing the gap between the upper and lower bound on the optimal regret. First, we reject the conjecture above by proposing algorithms that achieve the regret $\\tilde O(\\sqrt{d_1 d_2 (d_1+d_2) T})$ using the fact that the action space dimension $O(d_1+d_2)$ is significantly lower than the matrix parameter dimension $O(d_1 d_2)$. Second, we additionally devise an algorithm with better empirical performance than previous algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/jang21a/jang21a.pdf", "supp": "", "pdf_size": 456817, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13409850845926860898&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jang21a.html" }, { "title": "Improved, Deterministic Smoothing for L_1 Certified Robustness", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9445", "id": "9445", "proceeding": "http://proceedings.mlr.press/v139/levine21a.html", "slides": "", "author_site": "Alexander Levine, Soheil Feizi", "author": "Alexander J Levine; Soheil Feizi", "abstract": "Randomized smoothing is a general technique for computing sample-dependent robustness guarantees against adversarial attacks for deep classifiers. Prior works on randomized smoothing against L_1 adversarial attacks use additive smoothing noise and provide probabilistic robustness guarantees. In this work, we propose a non-additive and deterministic smoothing method, Deterministic Smoothing with Splitting Noise (DSSN). To develop DSSN, we first develop SSN, a randomized method which involves generating each noisy smoothing sample by first randomly splitting the input space and then returning a representation of the center of the subdivision occupied by the input sample. In contrast to uniform additive smoothing, the SSN certification does not require the random noise components used to be independent. Thus, smoothing can be done effectively in just one dimension and can therefore be efficiently derandomized for quantized data (e.g., images). To the best of our knowledge, this is the first work to provide deterministic \"randomized smoothing\" for a norm-based adversarial threat model while allowing for an arbitrary classifier (i.e., a deep model) to be used as a base classifier and without requiring an exponential number of smoothing samples. On CIFAR-10 and ImageNet datasets, we provide substantially larger L_1 robustness certificates compared to prior works, establishing a new state-of-the-art. The determinism of our method also leads to significantly faster certificate computation. Code is available at: https://github.com/alevine0/smoothingSplittingNoise.", "bibtex": "@InProceedings{pmlr-v139-levine21a,\n title = \t {Improved, Deterministic Smoothing for L_1 Certified Robustness},\n author = {Levine, Alexander J and Feizi, Soheil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6254--6264},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/levine21a/levine21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/levine21a.html},\n abstract = \t {Randomized smoothing is a general technique for computing sample-dependent robustness guarantees against adversarial attacks for deep classifiers. Prior works on randomized smoothing against L_1 adversarial attacks use additive smoothing noise and provide probabilistic robustness guarantees. In this work, we propose a non-additive and deterministic smoothing method, Deterministic Smoothing with Splitting Noise (DSSN). To develop DSSN, we first develop SSN, a randomized method which involves generating each noisy smoothing sample by first randomly splitting the input space and then returning a representation of the center of the subdivision occupied by the input sample. In contrast to uniform additive smoothing, the SSN certification does not require the random noise components used to be independent. Thus, smoothing can be done effectively in just one dimension and can therefore be efficiently derandomized for quantized data (e.g., images). To the best of our knowledge, this is the first work to provide deterministic \"randomized smoothing\" for a norm-based adversarial threat model while allowing for an arbitrary classifier (i.e., a deep model) to be used as a base classifier and without requiring an exponential number of smoothing samples. On CIFAR-10 and ImageNet datasets, we provide substantially larger L_1 robustness certificates compared to prior works, establishing a new state-of-the-art. The determinism of our method also leads to significantly faster certificate computation. Code is available at: https://github.com/alevine0/smoothingSplittingNoise.}\n}", "pdf": "http://proceedings.mlr.press/v139/levine21a/levine21a.pdf", "supp": "", "pdf_size": 1847895, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4413252390109069610&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Maryland, College Park, Maryland, USA; Department of Computer Science, University of Maryland, College Park, Maryland, USA", "aff_domain": "cs.umd.edu; ", "email": "cs.umd.edu; ", "github": "https://github.com/alevine0/smoothingSplittingNoise", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/levine21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Maryland, College Park", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www/umd.edu", "aff_unique_abbr": "UMD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "College Park", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Improving Breadth-Wise Backpropagation in Graph Neural Networks Helps Learning Long-Range Dependencies.", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9693", "id": "9693", "proceeding": "http://proceedings.mlr.press/v139/lukovnikov21a.html", "slides": "", "author_site": "Denis Lukovnikov, Asja Fischer", "author": "Denis Lukovnikov; Asja Fischer", "abstract": "In this work, we focus on the ability of graph neural networks (GNNs) to learn long-range patterns in graphs with edge features. Learning patterns that involve longer paths in the graph, requires using deeper GNNs. However, GNNs suffer from a drop in performance with increasing network depth. To improve the performance of deeper GNNs, previous works have investigated normalization techniques and various types of skip connections. While they are designed to improve depth-wise backpropagation between the representations of the same node in successive layers, they do not improve breadth-wise backpropagation between representations of neighbouring nodes. To analyse the consequences, we design synthetic datasets serving as a testbed for the ability of GNNs to learn long-range patterns. Our analysis shows that several commonly used GNN variants with only depth-wise skip connections indeed have problems learning long-range patterns. They are clearly outperformed by an attention-based GNN architecture that we propose for improving both depth- and breadth-wise backpropagation. We also verify that the presented architecture is competitive on real-world data.", "bibtex": "@InProceedings{pmlr-v139-lukovnikov21a,\n title = \t {Improving Breadth-Wise Backpropagation in Graph Neural Networks Helps Learning Long-Range Dependencies.},\n author = {Lukovnikov, Denis and Fischer, Asja},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7180--7191},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lukovnikov21a/lukovnikov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lukovnikov21a.html},\n abstract = \t {In this work, we focus on the ability of graph neural networks (GNNs) to learn long-range patterns in graphs with edge features. Learning patterns that involve longer paths in the graph, requires using deeper GNNs. However, GNNs suffer from a drop in performance with increasing network depth. To improve the performance of deeper GNNs, previous works have investigated normalization techniques and various types of skip connections. While they are designed to improve depth-wise backpropagation between the representations of the same node in successive layers, they do not improve breadth-wise backpropagation between representations of neighbouring nodes. To analyse the consequences, we design synthetic datasets serving as a testbed for the ability of GNNs to learn long-range patterns. Our analysis shows that several commonly used GNN variants with only depth-wise skip connections indeed have problems learning long-range patterns. They are clearly outperformed by an attention-based GNN architecture that we propose for improving both depth- and breadth-wise backpropagation. We also verify that the presented architecture is competitive on real-world data.}\n}", "pdf": "http://proceedings.mlr.press/v139/lukovnikov21a/lukovnikov21a.pdf", "supp": "", "pdf_size": 375082, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2090106818419535670&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Ruhr University Bochum; Ruhr University Bochum", "aff_domain": "rub.de;rub.de", "email": "rub.de;rub.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lukovnikov21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Ruhr University Bochum", "aff_unique_dep": "", "aff_unique_url": "https://www.ruhr-uni-bochum.de", "aff_unique_abbr": "RUB", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "Improving Generalization in Meta-learning via Task Augmentation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9091", "id": "9091", "proceeding": "http://proceedings.mlr.press/v139/yao21b.html", "slides": "", "author_site": "Huaxiu Yao, Long-Kai Huang, Linjun Zhang, Ying WEI, Li Tian, James Zou, Junzhou Huang, Zhenhui (Jessie) Li", "author": "Huaxiu Yao; Long-Kai Huang; Linjun Zhang; Ying Wei; Li Tian; James Zou; Junzhou Huang; Zhenhui () Li", "abstract": "Meta-learning has proven to be a powerful paradigm for transferring the knowledge from previous tasks to facilitate the learning of a novel task. Current dominant algorithms train a well-generalized model initialization which is adapted to each task via the support set. The crux lies in optimizing the generalization capability of the initialization, which is measured by the performance of the adapted model on the query set of each task. Unfortunately, this generalization measure, evidenced by empirical results, pushes the initialization to overfit the meta-training tasks, which significantly impairs the generalization and adaptation to novel tasks. To address this issue, we actively augment a meta-training task with \u201cmore data\u201d when evaluating the generalization. Concretely, we propose two task augmentation methods, including MetaMix and Channel Shuffle. MetaMix linearly combines features and labels of samples from both the support and query sets. For each class of samples, Channel Shuffle randomly replaces a subset of their channels with the corresponding ones from a different class. Theoretical studies show how task augmentation improves the generalization of meta-learning. Moreover, both MetaMix and Channel Shuffle outperform state-of-the-art results by a large margin across many datasets and are compatible with existing meta-learning algorithms.", "bibtex": "@InProceedings{pmlr-v139-yao21b,\n title = \t {Improving Generalization in Meta-learning via Task Augmentation},\n author = {Yao, Huaxiu and Huang, Long-Kai and Zhang, Linjun and Wei, Ying and Tian, Li and Zou, James and Huang, Junzhou and Li, Zhenhui ()},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11887--11897},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yao21b/yao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yao21b.html},\n abstract = \t {Meta-learning has proven to be a powerful paradigm for transferring the knowledge from previous tasks to facilitate the learning of a novel task. Current dominant algorithms train a well-generalized model initialization which is adapted to each task via the support set. The crux lies in optimizing the generalization capability of the initialization, which is measured by the performance of the adapted model on the query set of each task. Unfortunately, this generalization measure, evidenced by empirical results, pushes the initialization to overfit the meta-training tasks, which significantly impairs the generalization and adaptation to novel tasks. To address this issue, we actively augment a meta-training task with \u201cmore data\u201d when evaluating the generalization. Concretely, we propose two task augmentation methods, including MetaMix and Channel Shuffle. MetaMix linearly combines features and labels of samples from both the support and query sets. For each class of samples, Channel Shuffle randomly replaces a subset of their channels with the corresponding ones from a different class. Theoretical studies show how task augmentation improves the generalization of meta-learning. Moreover, both MetaMix and Channel Shuffle outperform state-of-the-art results by a large margin across many datasets and are compatible with existing meta-learning algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/yao21b/yao21b.pdf", "supp": "", "pdf_size": 668745, "gs_citation": 104, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=756197262814969387&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Stanford University, CA, USA; Tencent AI Lab, Shenzhen, China; Rutgers University, NJ, USA; City University of Hong Kong, Hong Kong; Tencent AI Lab, Shenzhen, China; Stanford University, CA, USA; Tencent AI Lab, Shenzhen, China; Pennsylvania State University, PA, USA", "aff_domain": "cityu.edu.hk; ; ; ; ; ; ;", "email": "cityu.edu.hk; ; ; ; ; ; ;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/yao21b.html", "aff_unique_index": "0;1;2;3;1;0;1;4", "aff_unique_norm": "Stanford University;Tencent;Rutgers University;City University of Hong Kong;Pennsylvania State University", "aff_unique_dep": ";AI Lab;;;", "aff_unique_url": "https://www.stanford.edu;https://ai.tencent.com;https://www.rutgers.edu;https://www.cityu.edu.hk;https://www.psu.edu", "aff_unique_abbr": "Stanford;Tencent AI Lab;Rutgers;CityU;PSU", "aff_campus_unique_index": "0;1;2;3;1;0;1;4", "aff_campus_unique": "California;Shenzhen;New Brunswick;Hong Kong SAR;University Park", "aff_country_unique_index": "0;1;0;1;1;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Improving Gradient Regularization using Complex-Valued Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10759", "id": "10759", "proceeding": "http://proceedings.mlr.press/v139/yeats21a.html", "slides": "/media/icml-2021/Slides/10759.pdf", "author_site": "Eric Yeats, Yiran Chen, Hai Li", "author": "Eric C Yeats; Yiran Chen; Hai Li", "abstract": "Gradient regularization is a neural network defense technique that requires no prior knowledge of an adversarial attack and that brings only limited increase in training computational complexity. A form of complex-valued neural network (CVNN) is proposed to improve the performance of gradient regularization on classification tasks of real-valued input in adversarial settings. The activation derivatives of each layer of the CVNN are dependent on the combination of inputs to the layer, and locally stable representations can be learned for inputs the network is trained on. Furthermore, the properties of the CVNN parameter derivatives resist decrease of performance on the standard objective that is caused by competition with the gradient regularization objective. Experimental results show that the performance of gradient regularized CVNN surpasses that of real-valued neural networks with comparable storage and computational complexity. Moreover, gradient regularized complex-valued networks exhibit robust performance approaching that of real-valued networks trained with multi-step adversarial training.", "bibtex": "@InProceedings{pmlr-v139-yeats21a,\n title = \t {Improving Gradient Regularization using Complex-Valued Neural Networks},\n author = {Yeats, Eric C and Chen, Yiran and Li, Hai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11953--11963},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yeats21a/yeats21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yeats21a.html},\n abstract = \t {Gradient regularization is a neural network defense technique that requires no prior knowledge of an adversarial attack and that brings only limited increase in training computational complexity. A form of complex-valued neural network (CVNN) is proposed to improve the performance of gradient regularization on classification tasks of real-valued input in adversarial settings. The activation derivatives of each layer of the CVNN are dependent on the combination of inputs to the layer, and locally stable representations can be learned for inputs the network is trained on. Furthermore, the properties of the CVNN parameter derivatives resist decrease of performance on the standard objective that is caused by competition with the gradient regularization objective. Experimental results show that the performance of gradient regularized CVNN surpasses that of real-valued neural networks with comparable storage and computational complexity. Moreover, gradient regularized complex-valued networks exhibit robust performance approaching that of real-valued networks trained with multi-step adversarial training.}\n}", "pdf": "http://proceedings.mlr.press/v139/yeats21a/yeats21a.pdf", "supp": "", "pdf_size": 2550263, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8760540912907236396&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "ECE Dept., Duke University; ECE Dept., Duke University; ECE Dept., Duke University", "aff_domain": "duke.edu; ; ", "email": "duke.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yeats21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "Electrical and Computer Engineering", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Improving Lossless Compression Rates via Monte Carlo Bits-Back Coding", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10517", "id": "10517", "proceeding": "http://proceedings.mlr.press/v139/ruan21a.html", "slides": "/media/icml-2021/Slides/10517.pdf", "author_site": "Yangjun Ruan, Karen Ullrich, Daniel Severo, James Townsend, Ashish Khisti, Arnaud Doucet, Alireza Makhzani, Chris Maddison", "author": "Yangjun Ruan; Karen Ullrich; Daniel S Severo; James Townsend; Ashish Khisti; Arnaud Doucet; Alireza Makhzani; Chris Maddison", "abstract": "Latent variable models have been successfully applied in lossless compression with the bits-back coding algorithm. However, bits-back suffers from an increase in the bitrate equal to the KL divergence between the approximate posterior and the true posterior. In this paper, we show how to remove this gap asymptotically by deriving bits-back coding algorithms from tighter variational bounds. The key idea is to exploit extended space representations of Monte Carlo estimators of the marginal likelihood. Naively applied, our schemes would require more initial bits than the standard bits-back coder, but we show how to drastically reduce this additional cost with couplings in the latent space. When parallel architectures can be exploited, our coders can achieve better rates than bits-back with little additional cost. We demonstrate improved lossless compression rates in a variety of settings, especially in out-of-distribution or sequential data compression.", "bibtex": "@InProceedings{pmlr-v139-ruan21a,\n title = \t {Improving Lossless Compression Rates via Monte Carlo Bits-Back Coding},\n author = {Ruan, Yangjun and Ullrich, Karen and Severo, Daniel S and Townsend, James and Khisti, Ashish and Doucet, Arnaud and Makhzani, Alireza and Maddison, Chris},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9136--9147},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ruan21a/ruan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ruan21a.html},\n abstract = \t {Latent variable models have been successfully applied in lossless compression with the bits-back coding algorithm. However, bits-back suffers from an increase in the bitrate equal to the KL divergence between the approximate posterior and the true posterior. In this paper, we show how to remove this gap asymptotically by deriving bits-back coding algorithms from tighter variational bounds. The key idea is to exploit extended space representations of Monte Carlo estimators of the marginal likelihood. Naively applied, our schemes would require more initial bits than the standard bits-back coder, but we show how to drastically reduce this additional cost with couplings in the latent space. When parallel architectures can be exploited, our coders can achieve better rates than bits-back with little additional cost. We demonstrate improved lossless compression rates in a variety of settings, especially in out-of-distribution or sequential data compression.}\n}", "pdf": "http://proceedings.mlr.press/v139/ruan21a/ruan21a.pdf", "supp": "", "pdf_size": 1495877, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1052321349567422387&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/ruan21a.html" }, { "title": "Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10323", "id": "10323", "proceeding": "http://proceedings.mlr.press/v139/henderson21a.html", "slides": "", "author_site": "Ryan Henderson, Djork-Arn\u00e9 Clevert, Floriane Montanari", "author": "Ryan Henderson; Djork-Arn\u00e9 Clevert; Floriane Montanari", "abstract": "Rationalizing which parts of a molecule drive the predictions of a molecular graph convolutional neural network (GCNN) can be difficult. To help, we propose two simple regularization techniques to apply during the training of GCNNs: Batch Representation Orthonormalization (BRO) and Gini regularization. BRO, inspired by molecular orbital theory, encourages graph convolution operations to generate orthonormal node embeddings. Gini regularization is applied to the weights of the output layer and constrains the number of dimensions the model can use to make predictions. We show that Gini and BRO regularization can improve the accuracy of state-of-the-art GCNN attribution methods on artificial benchmark datasets. In a real-world setting, we demonstrate that medicinal chemists significantly prefer explanations extracted from regularized models. While we only study these regularizers in the context of GCNNs, both can be applied to other types of neural networks.", "bibtex": "@InProceedings{pmlr-v139-henderson21a,\n title = \t {Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity},\n author = {Henderson, Ryan and Clevert, Djork-Arn{\\'e} and Montanari, Floriane},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4203--4213},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/henderson21a/henderson21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/henderson21a.html},\n abstract = \t {Rationalizing which parts of a molecule drive the predictions of a molecular graph convolutional neural network (GCNN) can be difficult. To help, we propose two simple regularization techniques to apply during the training of GCNNs: Batch Representation Orthonormalization (BRO) and Gini regularization. BRO, inspired by molecular orbital theory, encourages graph convolution operations to generate orthonormal node embeddings. Gini regularization is applied to the weights of the output layer and constrains the number of dimensions the model can use to make predictions. We show that Gini and BRO regularization can improve the accuracy of state-of-the-art GCNN attribution methods on artificial benchmark datasets. In a real-world setting, we demonstrate that medicinal chemists significantly prefer explanations extracted from regularized models. While we only study these regularizers in the context of GCNNs, both can be applied to other types of neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/henderson21a/henderson21a.pdf", "supp": "", "pdf_size": 2138122, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2317141663535501848&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Digital Technologies, Bayer AG, Berlin, Germany; Digital Technologies, Bayer AG, Berlin, Germany; Digital Technologies, Bayer AG, Berlin, Germany", "aff_domain": "bayer.com; ;bayer.com", "email": "bayer.com; ;bayer.com", "github": "", "project": "https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#functional", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/henderson21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Bayer AG", "aff_unique_dep": "Digital Technologies", "aff_unique_url": "https://www.bayer.com", "aff_unique_abbr": "Bayer", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Improving Predictors via Combination Across Diverse Task Categories", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10419", "id": "10419", "proceeding": "http://proceedings.mlr.press/v139/kim21a.html", "slides": "/media/icml-2021/Slides/10419.pdf", "author": "Kwang In Kim", "abstract": "Predictor combination is the problem of improving a task predictor using predictors of other tasks when the forms of individual predictors are unknown. Previous work approached this problem by nonparametrically assessing predictor relationships based on their joint evaluations on a shared sample. This limits their application to cases where all predictors are defined on the same task category, e.g. all predictors estimate attributes of shoes. We present a new predictor combination algorithm that overcomes this limitation. Our algorithm aligns the heterogeneous domains of different predictors in a shared latent space to facilitate comparisons of predictors independently of the domains on which they are originally defined. We facilitate this by a new data alignment scheme that matches data distributions across task categories. Based on visual attribute ranking experiments on datasets that span diverse task categories (e.g. shoes and animals), we demonstrate that our approach often significantly improves the performances of the initial predictors.", "bibtex": "@InProceedings{pmlr-v139-kim21a,\n title = \t {Improving Predictors via Combination Across Diverse Task Categories},\n author = {Kim, Kwang In},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5475--5485},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21a/kim21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21a.html},\n abstract = \t {Predictor combination is the problem of improving a task predictor using predictors of other tasks when the forms of individual predictors are unknown. Previous work approached this problem by nonparametrically assessing predictor relationships based on their joint evaluations on a shared sample. This limits their application to cases where all predictors are defined on the same task category, e.g. all predictors estimate attributes of shoes. We present a new predictor combination algorithm that overcomes this limitation. Our algorithm aligns the heterogeneous domains of different predictors in a shared latent space to facilitate comparisons of predictors independently of the domains on which they are originally defined. We facilitate this by a new data alignment scheme that matches data distributions across task categories. Based on visual attribute ranking experiments on datasets that span diverse task categories (e.g. shoes and animals), we demonstrate that our approach often significantly improves the performances of the initial predictors.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21a/kim21a.pdf", "supp": "", "pdf_size": 507347, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:ffn60EgxUIkJ:scholar.google.com/&scioq=Improving+Predictors+via+Combination+Across+Diverse+Task+Categories&hl=en&as_sdt=0,5", "gs_version_total": 4, "aff": "UNIST, Ulsan, Korea", "aff_domain": "unist.ac.kr", "email": "unist.ac.kr", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/kim21a.html", "aff_unique_index": "0", "aff_unique_norm": "Ulsan National Institute of Science and Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.unist.ac.kr", "aff_unique_abbr": "UNIST", "aff_campus_unique_index": "0", "aff_campus_unique": "Ulsan", "aff_country_unique_index": "0", "aff_country_unique": "South Korea" }, { "title": "Improving Ultrametrics Embeddings Through Coresets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8775", "id": "8775", "proceeding": "http://proceedings.mlr.press/v139/cohen-addad21a.html", "slides": "", "author_site": "Vincent Cohen-Addad, R\u00e9mi de Joannis de Verclos, Guillaume Lagarde", "author": "Vincent Cohen-Addad; R\u00e9mi De Joannis De Verclos; Guillaume Lagarde", "abstract": "To tackle the curse of dimensionality in data analysis and unsupervised learning, it is critical to be able to efficiently compute \u201csimple\u201d faithful representations of the data that helps extract information, improves understanding and visualization of the structure. When the dataset consists of $d$-dimensional vectors, simple representations of the data may consist in trees or ultrametrics, and the goal is to best preserve the distances (i.e.: dissimilarity values) between data elements. To circumvent the quadratic running times of the most popular methods for fitting ultrametrics, such as average, single, or complete linkage,\u00a0\\citet{CKL20} recently presented a new algorithm that for any $c \\ge 1$, outputs in time $n^{1+O(1/c^2)}$ an ultrametric $\\Delta$ such that for any two points $u, v$, $\\Delta(u, v)$ is within a multiplicative factor of $5c$ to the distance between $u$ and $v$ in the \u201cbest\u201d ultrametric representation. We improve the above result and show how to improve the above guarantee from $5c$ to $\\sqrt{2}c + \\varepsilon$ while achieving the same asymptotic running time. To complement the improved theoretical bound, we additionally show that the performances of our algorithm are significantly better for various real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-cohen-addad21a,\n title = \t {Improving Ultrametrics Embeddings Through Coresets},\n author = {Cohen-Addad, Vincent and De Joannis De Verclos, R{\\'e}mi and Lagarde, Guillaume},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2060--2068},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cohen-addad21a/cohen-addad21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cohen-addad21a.html},\n abstract = \t {To tackle the curse of dimensionality in data analysis and unsupervised learning, it is critical to be able to efficiently compute \u201csimple\u201d faithful representations of the data that helps extract information, improves understanding and visualization of the structure. When the dataset consists of $d$-dimensional vectors, simple representations of the data may consist in trees or ultrametrics, and the goal is to best preserve the distances (i.e.: dissimilarity values) between data elements. To circumvent the quadratic running times of the most popular methods for fitting ultrametrics, such as average, single, or complete linkage,\u00a0\\citet{CKL20} recently presented a new algorithm that for any $c \\ge 1$, outputs in time $n^{1+O(1/c^2)}$ an ultrametric $\\Delta$ such that for any two points $u, v$, $\\Delta(u, v)$ is within a multiplicative factor of $5c$ to the distance between $u$ and $v$ in the \u201cbest\u201d ultrametric representation. We improve the above result and show how to improve the above guarantee from $5c$ to $\\sqrt{2}c + \\varepsilon$ while achieving the same asymptotic running time. To complement the improved theoretical bound, we additionally show that the performances of our algorithm are significantly better for various real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/cohen-addad21a/cohen-addad21a.pdf", "supp": "", "pdf_size": 310725, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=801008666692715992&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Google Research, Zurich; ENS Lyon; LaBRI, CNRS", "aff_domain": "google.com;ens-lyon.org;labri.fr", "email": "google.com;ens-lyon.org;labri.fr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cohen-addad21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Google;Ecole Normale Sup\u00e9rieure de Lyon;CNRS", "aff_unique_dep": "Google Research;;LaBRI", "aff_unique_url": "https://research.google;https://www.ens-lyon.fr;https://www.cnrs.fr", "aff_unique_abbr": "Google;ENS Lyon;CNRS", "aff_campus_unique_index": "0", "aff_campus_unique": "Zurich;", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Switzerland;France" }, { "title": "In-Database Regression in Input Sparsity Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8647", "id": "8647", "proceeding": "http://proceedings.mlr.press/v139/jayaram21a.html", "slides": "/media/icml-2021/Slides/8647.pdf", "author_site": "Rajesh Jayaram, Alireza Samadian, David Woodruff, Peng Ye", "author": "Rajesh Jayaram; Alireza Samadian; David Woodruff; Peng Ye", "abstract": "Sketching is a powerful dimensionality reduction technique for accelerating algorithms for data analysis. A crucial step in sketching methods is to compute a subspace embedding (SE) for a large matrix $A \\in \\mathbb{R}^{N \\times d}$. SE\u2019s are the primary tool for obtaining extremely efficient solutions for many linear-algebraic tasks, such as least squares regression and low rank approximation. Computing an SE often requires an explicit representation of $A$ and running time proportional to the size of $A$. However, if $A= T_1 \\Join T_2 \\Join \u2026\\Join T_m$ is the result of a database join query on several smaller tables $T_i \\in \\mathbb{R}^{n_i \\times d_i}$, then this running time can be prohibitive, as $A$ itself can have as many as $O(n_1 n_2 \\cdots n_m)$ rows. In this work, we design subspace embeddings for database joins which can be computed significantly faster than computing the join. For the case of a two table join $A = T_1 \\Join T_2$ we give input-sparsity algorithms for computing subspace embeddings, with running time bounded by the number of non-zero entries in $T_1,T_2$. This results in input-sparsity time algorithms for high accuracy regression, significantly improving upon the running time of prior FAQ-based methods for regression. We extend our results to arbitrary joins for the ridge regression problem, also considerably improving the running time of prior methods. Empirically, we apply our method to real datasets and show that it is significantly faster than existing algorithms.", "bibtex": "@InProceedings{pmlr-v139-jayaram21a,\n title = \t {In-Database Regression in Input Sparsity Time},\n author = {Jayaram, Rajesh and Samadian, Alireza and Woodruff, David and Ye, Peng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4797--4806},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jayaram21a/jayaram21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jayaram21a.html},\n abstract = \t {Sketching is a powerful dimensionality reduction technique for accelerating algorithms for data analysis. A crucial step in sketching methods is to compute a subspace embedding (SE) for a large matrix $A \\in \\mathbb{R}^{N \\times d}$. SE\u2019s are the primary tool for obtaining extremely efficient solutions for many linear-algebraic tasks, such as least squares regression and low rank approximation. Computing an SE often requires an explicit representation of $A$ and running time proportional to the size of $A$. However, if $A= T_1 \\Join T_2 \\Join \u2026\\Join T_m$ is the result of a database join query on several smaller tables $T_i \\in \\mathbb{R}^{n_i \\times d_i}$, then this running time can be prohibitive, as $A$ itself can have as many as $O(n_1 n_2 \\cdots n_m)$ rows. In this work, we design subspace embeddings for database joins which can be computed significantly faster than computing the join. For the case of a two table join $A = T_1 \\Join T_2$ we give input-sparsity algorithms for computing subspace embeddings, with running time bounded by the number of non-zero entries in $T_1,T_2$. This results in input-sparsity time algorithms for high accuracy regression, significantly improving upon the running time of prior FAQ-based methods for regression. We extend our results to arbitrary joins for the ridge regression problem, also considerably improving the running time of prior methods. Empirically, we apply our method to real datasets and show that it is significantly faster than existing algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/jayaram21a/jayaram21a.pdf", "supp": "", "pdf_size": 383279, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4719057238276619749&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Computer Science Department, Carnegie Mellon University, Pittsburgh PA, United States; Department of Computer Science, University of Pittsburgh, Pittsburgh PA, United States; Computer Science Department, Carnegie Mellon University, Pittsburgh PA, United States; Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China", "aff_domain": "cs.cmu.edu; ;cs.cmu.edu; ", "email": "cs.cmu.edu; ;cs.cmu.edu; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jayaram21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Carnegie Mellon University;University of Pittsburgh;Tsinghua University", "aff_unique_dep": "Computer Science Department;Department of Computer Science;Institute for Interdisciplinary Information Sciences", "aff_unique_url": "https://www.cmu.edu;https://www.pitt.edu;https://www.tsinghua.edu.cn", "aff_unique_abbr": "CMU;Pitt;Tsinghua", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Pittsburgh;Beijing", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "United States;China" }, { "title": "Incentivized Bandit Learning with Self-Reinforcing User Preferences", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8731", "id": "8731", "proceeding": "http://proceedings.mlr.press/v139/zhou21d.html", "slides": "/media/icml-2021/Slides/8731.pdf", "author_site": "Tianchen Zhou, Jia Liu, Chaosheng Dong, jingyuan deng", "author": "Tianchen Zhou; Jia Liu; Chaosheng Dong; Jingyuan Deng", "abstract": "In this paper, we investigate a new multi-armed bandit (MAB) online learning model that considers real-world phenomena in many recommender systems: (i) the learning agent cannot pull the arms by itself and thus has to offer rewards to users to incentivize arm-pulling indirectly; and (ii) if users with specific arm preferences are well rewarded, they induce a \"self-reinforcing\" effect in the sense that they will attract more users of similar arm preferences. Besides addressing the tradeoff of exploration and exploitation, another key feature of this new MAB model is to balance reward and incentivizing payment. The goal of the agent is to maximize the total reward over a fixed time horizon $T$ with a low total payment. Our contributions in this paper are two-fold: (i) We propose a new MAB model with random arm selection that considers the relationship of users\u2019 self-reinforcing preferences and incentives; and (ii) We leverage the properties of a multi-color Polya urn with nonlinear feedback model to propose two MAB policies termed \"At-Least-$n$ Explore-Then-Commit\" and \"UCB-List\". We prove that both policies achieve $O(log T)$ expected regret with $O(log T)$ expected payment over a time horizon $T$. We conduct numerical simulations to demonstrate and verify the performances of these two policies and study their robustness under various settings.", "bibtex": "@InProceedings{pmlr-v139-zhou21d,\n title = \t {Incentivized Bandit Learning with Self-Reinforcing User Preferences},\n author = {Zhou, Tianchen and Liu, Jia and Dong, Chaosheng and Deng, Jingyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12824--12834},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21d/zhou21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21d.html},\n abstract = \t {In this paper, we investigate a new multi-armed bandit (MAB) online learning model that considers real-world phenomena in many recommender systems: (i) the learning agent cannot pull the arms by itself and thus has to offer rewards to users to incentivize arm-pulling indirectly; and (ii) if users with specific arm preferences are well rewarded, they induce a \"self-reinforcing\" effect in the sense that they will attract more users of similar arm preferences. Besides addressing the tradeoff of exploration and exploitation, another key feature of this new MAB model is to balance reward and incentivizing payment. The goal of the agent is to maximize the total reward over a fixed time horizon $T$ with a low total payment. Our contributions in this paper are two-fold: (i) We propose a new MAB model with random arm selection that considers the relationship of users\u2019 self-reinforcing preferences and incentives; and (ii) We leverage the properties of a multi-color Polya urn with nonlinear feedback model to propose two MAB policies termed \"At-Least-$n$ Explore-Then-Commit\" and \"UCB-List\". We prove that both policies achieve $O(log T)$ expected regret with $O(log T)$ expected payment over a time horizon $T$. We conduct numerical simulations to demonstrate and verify the performances of these two policies and study their robustness under various settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21d/zhou21d.pdf", "supp": "", "pdf_size": 656988, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4800992229070719006&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, The Ohio State University, Columbus, Ohio, USA; Department of Electrical and Computer Engineering, The Ohio State University, Columbus, Ohio, USA; Amazon, Seattle, Washington, USA; Amazon, Seattle, Washington, USA", "aff_domain": "osu.edu;ece.osu.edu; ; ", "email": "osu.edu;ece.osu.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhou21d.html", "aff_unique_index": "0;0;1;1", "aff_unique_norm": "Ohio State University;Amazon", "aff_unique_dep": "Department of Electrical and Computer Engineering;Amazon", "aff_unique_url": "https://www.osu.edu;https://www.amazon.com", "aff_unique_abbr": "OSU;Amazon", "aff_campus_unique_index": "0;0;1;1", "aff_campus_unique": "Columbus;Seattle", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Incentivizing Compliance with Algorithmic Instruments", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10359", "id": "10359", "proceeding": "http://proceedings.mlr.press/v139/ngo21a.html", "slides": "", "author_site": "Dung Ngo, Logan Stapleton, Vasilis Syrgkanis, Steven Wu", "author": "Dung Daniel T Ngo; Logan Stapleton; Vasilis Syrgkanis; Steven Wu", "abstract": "Randomized experiments can be susceptible to selection bias due to potential non-compliance by the participants. While much of the existing work has studied compliance as a static behavior, we propose a game-theoretic model to study compliance as dynamic behavior that may change over time. In rounds, a social planner interacts with a sequence of heterogeneous agents who arrive with their unobserved private type that determines both their prior preferences across the actions (e.g., control and treatment) and their baseline rewards without taking any treatment. The planner provides each agent with a randomized recommendation that may alter their beliefs and their action selection. We develop a novel recommendation mechanism that views the planner\u2019s recommendation as a form of instrumental variable (IV) that only affects an agents\u2019 action selection, but not the observed rewards. We construct such IVs by carefully mapping the history \u2013the interactions between the planner and the previous agents\u2013 to a random recommendation. Even though the initial agents may be completely non-compliant, our mechanism can incentivize compliance over time, thereby enabling the estimation of the treatment effect of each treatment, and minimizing the cumulative regret of the planner whose goal is to identify the optimal treatment.", "bibtex": "@InProceedings{pmlr-v139-ngo21a,\n title = \t {Incentivizing Compliance with Algorithmic Instruments},\n author = {Ngo, Dung Daniel T and Stapleton, Logan and Syrgkanis, Vasilis and Wu, Steven},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8045--8055},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ngo21a/ngo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ngo21a.html},\n abstract = \t {Randomized experiments can be susceptible to selection bias due to potential non-compliance by the participants. While much of the existing work has studied compliance as a static behavior, we propose a game-theoretic model to study compliance as dynamic behavior that may change over time. In rounds, a social planner interacts with a sequence of heterogeneous agents who arrive with their unobserved private type that determines both their prior preferences across the actions (e.g., control and treatment) and their baseline rewards without taking any treatment. The planner provides each agent with a randomized recommendation that may alter their beliefs and their action selection. We develop a novel recommendation mechanism that views the planner\u2019s recommendation as a form of instrumental variable (IV) that only affects an agents\u2019 action selection, but not the observed rewards. We construct such IVs by carefully mapping the history \u2013the interactions between the planner and the previous agents\u2013 to a random recommendation. Even though the initial agents may be completely non-compliant, our mechanism can incentivize compliance over time, thereby enabling the estimation of the treatment effect of each treatment, and minimizing the cumulative regret of the planner whose goal is to identify the optimal treatment.}\n}", "pdf": "http://proceedings.mlr.press/v139/ngo21a/ngo21a.pdf", "supp": "", "pdf_size": 475178, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8032953671879607459&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Minnesota; University of Minnesota; Microsoft Research; Carnegie Mellon University", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ngo21a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "University of Minnesota;Microsoft;Carnegie Mellon University", "aff_unique_dep": ";Microsoft Research;", "aff_unique_url": "https://www.minnesota.edu;https://www.microsoft.com/en-us/research;https://www.cmu.edu", "aff_unique_abbr": "UMN;MSR;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Inference for Network Regression Models with Community Structure", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9945", "id": "9945", "proceeding": "http://proceedings.mlr.press/v139/pan21a.html", "slides": "", "author_site": "Mengjie Pan, Tyler Mccormick, Bailey Fosdick", "author": "Mengjie Pan; Tyler Mccormick; Bailey Fosdick", "abstract": "Network regression models, where the outcome comprises the valued edge in a network and the predictors are actor or dyad-level covariates, are used extensively in the social and biological sciences. Valid inference relies on accurately modeling the residual dependencies among the relations. Frequently homogeneity assumptions are placed on the errors which are commonly incorrect and ignore critical natural clustering of the actors. In this work, we present a novel regression modeling framework that models the errors as resulting from a community-based dependence structure and exploits the subsequent exchangeability properties of the error distribution to obtain parsimonious standard errors for regression parameters.", "bibtex": "@InProceedings{pmlr-v139-pan21a,\n title = \t {Inference for Network Regression Models with Community Structure},\n author = {Pan, Mengjie and Mccormick, Tyler and Fosdick, Bailey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8349--8358},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pan21a/pan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/pan21a.html},\n abstract = \t {Network regression models, where the outcome comprises the valued edge in a network and the predictors are actor or dyad-level covariates, are used extensively in the social and biological sciences. Valid inference relies on accurately modeling the residual dependencies among the relations. Frequently homogeneity assumptions are placed on the errors which are commonly incorrect and ignore critical natural clustering of the actors. In this work, we present a novel regression modeling framework that models the errors as resulting from a community-based dependence structure and exploits the subsequent exchangeability properties of the error distribution to obtain parsimonious standard errors for regression parameters.}\n}", "pdf": "http://proceedings.mlr.press/v139/pan21a/pan21a.pdf", "supp": "", "pdf_size": 957101, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:FFHe68ZUG9MJ:scholar.google.com/&scioq=Inference+for+Network+Regression+Models+with+Community+Structure&hl=en&as_sdt=0,5", "gs_version_total": 5, "aff": "Facebook, Seattle, Washington, USA; Department of Statistics and Department of Sociology, University of Washington, Seattle, Washington, USA; Department of Statistics, Colorado State University, Fort Collins, Colorado, USA", "aff_domain": "gmail.com;uw.edu;colostate.edu", "email": "gmail.com;uw.edu;colostate.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/pan21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Meta;University of Washington;Colorado State University", "aff_unique_dep": "Facebook;Department of Statistics;Department of Statistics", "aff_unique_url": "https://www.facebook.com;https://www.washington.edu;https://www.colostate.edu", "aff_unique_abbr": "FB;UW;CSU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Seattle;Fort Collins", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Inferring Latent Dynamics Underlying Neural Population Activity via Neural Differential Equations", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9219", "id": "9219", "proceeding": "http://proceedings.mlr.press/v139/kim21h.html", "slides": "", "author_site": "Timothy Kim, Thomas Luo, Jonathan Pillow, Carlos Brody", "author": "Timothy D. Kim; Thomas Z. Luo; Jonathan W. Pillow; Carlos D. Brody", "abstract": "An important problem in systems neuroscience is to identify the latent dynamics underlying neural population activity. Here we address this problem by introducing a low-dimensional nonlinear model for latent neural population dynamics using neural ordinary differential equations (neural ODEs), with noisy sensory inputs and Poisson spike train outputs. We refer to this as the Poisson Latent Neural Differential Equations (PLNDE) model. We apply the PLNDE framework to a variety of synthetic datasets, and show that it accurately infers the phase portraits and fixed points of nonlinear systems augmented to produce spike train data, including the FitzHugh-Nagumo oscillator, a 3-dimensional nonlinear spiral, and a nonlinear sensory decision-making model with attractor dynamics. Our model significantly outperforms existing methods at inferring single-trial neural firing rates and the corresponding latent trajectories that generated them, especially in the regime where the spike counts and number of trials are low. We then apply our model to multi-region neural population recordings from medial frontal cortex of rats performing an auditory decision-making task. Our model provides a general, interpretable framework for investigating the neural mechanisms of decision-making and other cognitive computations through the lens of dynamical systems.", "bibtex": "@InProceedings{pmlr-v139-kim21h,\n title = \t {Inferring Latent Dynamics Underlying Neural Population Activity via Neural Differential Equations},\n author = {Kim, Timothy D. and Luo, Thomas Z. and Pillow, Jonathan W. and Brody, Carlos D.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5551--5561},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21h/kim21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21h.html},\n abstract = \t {An important problem in systems neuroscience is to identify the latent dynamics underlying neural population activity. Here we address this problem by introducing a low-dimensional nonlinear model for latent neural population dynamics using neural ordinary differential equations (neural ODEs), with noisy sensory inputs and Poisson spike train outputs. We refer to this as the Poisson Latent Neural Differential Equations (PLNDE) model. We apply the PLNDE framework to a variety of synthetic datasets, and show that it accurately infers the phase portraits and fixed points of nonlinear systems augmented to produce spike train data, including the FitzHugh-Nagumo oscillator, a 3-dimensional nonlinear spiral, and a nonlinear sensory decision-making model with attractor dynamics. Our model significantly outperforms existing methods at inferring single-trial neural firing rates and the corresponding latent trajectories that generated them, especially in the regime where the spike counts and number of trials are low. We then apply our model to multi-region neural population recordings from medial frontal cortex of rats performing an auditory decision-making task. Our model provides a general, interpretable framework for investigating the neural mechanisms of decision-making and other cognitive computations through the lens of dynamical systems.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21h/kim21h.pdf", "supp": "", "pdf_size": 3178662, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14222099579908293875&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Princeton Neuroscience Institute, Princeton, New Jersey; Princeton Neuroscience Institute, Princeton, New Jersey; Princeton Neuroscience Institute, Princeton, New Jersey + Department of Psychology, Princeton University, Princeton, New Jersey; Princeton Neuroscience Institute, Princeton, New Jersey + Department of Psychology, Princeton University, Princeton, New Jersey + Howard Hughs Medical Institute, Princeton University, Princeton, New Jersey", "aff_domain": "princeton.edu; ; ; ", "email": "princeton.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kim21h.html", "aff_unique_index": "0;0;0+0;0+0+0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Princeton Neuroscience Institute", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "0;0;0+0;0+0+0", "aff_campus_unique": "Princeton", "aff_country_unique_index": "0;0;0+0;0+0+0", "aff_country_unique": "United States" }, { "title": "Inferring serial correlation with dynamic backgrounds", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8639", "id": "8639", "proceeding": "http://proceedings.mlr.press/v139/wei21b.html", "slides": "", "author_site": "Song Wei, Yao Xie, Dobromir Rahnev", "author": "Song Wei; Yao Xie; Dobromir Rahnev", "abstract": "Sequential data with serial correlation and an unknown, unstructured, and dynamic background is ubiquitous in neuroscience, psychology, and econometrics. Inferring serial correlation for such data is a fundamental challenge in statistics. We propose a Total Variation (TV) constrained least square estimator coupled with hypothesis tests to infer the serial correlation in the presence of unknown and unstructured dynamic background. The TV constraint on the dynamic background encourages a piecewise constant structure, which can approximate a wide range of dynamic backgrounds. The tuning parameter is selected via the Ljung-Box test to control the bias-variance trade-off. We establish a non-asymptotic upper bound for the estimation error through variational inequalities. We also derive a lower error bound via Fano\u2019s method and show the proposed method is near-optimal. Numerical simulation and a real study in psychology demonstrate the excellent performance of our proposed method compared with the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-wei21b,\n title = \t {Inferring serial correlation with dynamic backgrounds},\n author = {Wei, Song and Xie, Yao and Rahnev, Dobromir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11047--11057},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wei21b/wei21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wei21b.html},\n abstract = \t {Sequential data with serial correlation and an unknown, unstructured, and dynamic background is ubiquitous in neuroscience, psychology, and econometrics. Inferring serial correlation for such data is a fundamental challenge in statistics. We propose a Total Variation (TV) constrained least square estimator coupled with hypothesis tests to infer the serial correlation in the presence of unknown and unstructured dynamic background. The TV constraint on the dynamic background encourages a piecewise constant structure, which can approximate a wide range of dynamic backgrounds. The tuning parameter is selected via the Ljung-Box test to control the bias-variance trade-off. We establish a non-asymptotic upper bound for the estimation error through variational inequalities. We also derive a lower error bound via Fano\u2019s method and show the proposed method is near-optimal. Numerical simulation and a real study in psychology demonstrate the excellent performance of our proposed method compared with the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/wei21b/wei21b.pdf", "supp": "", "pdf_size": 4383276, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2367808643994249978&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Industrial and Systems Engineering, Georgia Tech; School of Industrial and Systems Engineering, Georgia Tech; School of Psychology, Georgia Tech", "aff_domain": "gatech.edu;isye.gatech.edu; ", "email": "gatech.edu;isye.gatech.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wei21b.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Georgia Institute of Technology;Georgia Tech", "aff_unique_dep": "School of Industrial and Systems Engineering;School of Psychology", "aff_unique_url": "https://www.gatech.edu;https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech;GT", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Infinite-Dimensional Optimization for Zero-Sum Games via Variational Transport", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9863", "id": "9863", "proceeding": "http://proceedings.mlr.press/v139/liu21ac.html", "slides": "", "author_site": "Lewis Liu, Yufeng Zhang, Zhuoran Yang, Reza Babanezhad, Zhaoran Wang", "author": "Lewis Liu; Yufeng Zhang; Zhuoran Yang; Reza Babanezhad; Zhaoran Wang", "abstract": "Game optimization has been extensively studied when decision variables lie in a finite-dimensional space, of which solutions correspond to pure strategies at the Nash equilibrium (NE), and the gradient descent-ascent (GDA) method works widely in practice. In this paper, we consider infinite-dimensional zero-sum games by a min-max distributional optimization problem over a space of probability measures defined on a continuous variable set, which is inspired by finding a mixed NE for finite-dimensional zero-sum games. We then aim to answer the following question: \\textit{Will GDA-type algorithms still be provably efficient when extended to infinite-dimensional zero-sum games?} To answer this question, we propose a particle-based variational transport algorithm based on GDA in the functional spaces. Specifically, the algorithm performs multi-step functional gradient descent-ascent in the Wasserstein space via pushing two sets of particles in the variable space. By characterizing the gradient estimation error from variational form maximization and the convergence behavior of each player with different objective landscapes, we prove rigorously that the generalized GDA algorithm converges to the NE or the value of the game efficiently for a class of games under the Polyak-\u0141{ojasiewicz} (PL) condition. To conclude, we provide complete statistical and convergence guarantees for solving an infinite-dimensional zero-sum game via a provably efficient particle-based method. Additionally, our work provides the first thorough statistical analysis for the particle-based algorithm to learn an objective functional with a variational form using universal approximators (\\textit{i.e.}, neural networks (NNs)), which is of independent interest.", "bibtex": "@InProceedings{pmlr-v139-liu21ac,\n title = \t {Infinite-Dimensional Optimization for Zero-Sum Games via Variational Transport},\n author = {Liu, Lewis and Zhang, Yufeng and Yang, Zhuoran and Babanezhad, Reza and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7033--7044},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21ac/liu21ac.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21ac.html},\n abstract = \t {Game optimization has been extensively studied when decision variables lie in a finite-dimensional space, of which solutions correspond to pure strategies at the Nash equilibrium (NE), and the gradient descent-ascent (GDA) method works widely in practice. In this paper, we consider infinite-dimensional zero-sum games by a min-max distributional optimization problem over a space of probability measures defined on a continuous variable set, which is inspired by finding a mixed NE for finite-dimensional zero-sum games. We then aim to answer the following question: \\textit{Will GDA-type algorithms still be provably efficient when extended to infinite-dimensional zero-sum games?} To answer this question, we propose a particle-based variational transport algorithm based on GDA in the functional spaces. Specifically, the algorithm performs multi-step functional gradient descent-ascent in the Wasserstein space via pushing two sets of particles in the variable space. By characterizing the gradient estimation error from variational form maximization and the convergence behavior of each player with different objective landscapes, we prove rigorously that the generalized GDA algorithm converges to the NE or the value of the game efficiently for a class of games under the Polyak-\u0141{ojasiewicz} (PL) condition. To conclude, we provide complete statistical and convergence guarantees for solving an infinite-dimensional zero-sum game via a provably efficient particle-based method. Additionally, our work provides the first thorough statistical analysis for the particle-based algorithm to learn an objective functional with a variational form using universal approximators (\\textit{i.e.}, neural networks (NNs)), which is of independent interest.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21ac/liu21ac.pdf", "supp": "", "pdf_size": 610113, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2302671048937110131&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Universit\u00e9 de Montr\u00eal, Canada; Northwestern University, United States; Princeton University, United States; Samsung SAIT AI Lab, Canada; Northwestern University, United States", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21ac.html", "aff_unique_index": "0;1;2;3;1", "aff_unique_norm": "Universit\u00e9 de Montr\u00e9al;Northwestern University;Princeton University;Samsung", "aff_unique_dep": ";;;AI Lab", "aff_unique_url": "https://www.umontreal.ca;https://www.northwestern.edu;https://www.princeton.edu;https://www.sait.samsung.com", "aff_unique_abbr": "UdeM;NU;Princeton;SAIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0;1", "aff_country_unique": "Canada;United States" }, { "title": "Information Obfuscation of Graph Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8413", "id": "8413", "proceeding": "http://proceedings.mlr.press/v139/liao21a.html", "slides": "", "author_site": "Peiyuan Liao, Han Zhao, Keyulu Xu, Tommi Jaakkola, Geoff Gordon, Stefanie Jegelka, Ruslan Salakhutdinov", "author": "Peiyuan Liao; Han Zhao; Keyulu Xu; Tommi Jaakkola; Geoffrey J. Gordon; Stefanie Jegelka; Ruslan Salakhutdinov", "abstract": "While the advent of Graph Neural Networks (GNNs) has greatly improved node and graph representation learning in many applications, the neighborhood aggregation scheme exposes additional vulnerabilities to adversaries seeking to extract node-level information about sensitive attributes. In this paper, we study the problem of protecting sensitive attributes by information obfuscation when learning with graph structured data. We propose a framework to locally filter out pre-determined sensitive attributes via adversarial training with the total variation and the Wasserstein distance. Our method creates a strong defense against inference attacks, while only suffering small loss in task performance. Theoretically, we analyze the effectiveness of our framework against a worst-case adversary, and characterize an inherent trade-off between maximizing predictive accuracy and minimizing information leakage. Experiments across multiple datasets from recommender systems, knowledge graphs and quantum chemistry demonstrate that the proposed approach provides a robust defense across various graph structures and tasks, while producing competitive GNN encoders for downstream tasks.", "bibtex": "@InProceedings{pmlr-v139-liao21a,\n title = \t {Information Obfuscation of Graph Neural Networks},\n author = {Liao, Peiyuan and Zhao, Han and Xu, Keyulu and Jaakkola, Tommi and Gordon, Geoffrey J. and Jegelka, Stefanie and Salakhutdinov, Ruslan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6600--6610},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liao21a/liao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/liao21a.html},\n abstract = \t {While the advent of Graph Neural Networks (GNNs) has greatly improved node and graph representation learning in many applications, the neighborhood aggregation scheme exposes additional vulnerabilities to adversaries seeking to extract node-level information about sensitive attributes. In this paper, we study the problem of protecting sensitive attributes by information obfuscation when learning with graph structured data. We propose a framework to locally filter out pre-determined sensitive attributes via adversarial training with the total variation and the Wasserstein distance. Our method creates a strong defense against inference attacks, while only suffering small loss in task performance. Theoretically, we analyze the effectiveness of our framework against a worst-case adversary, and characterize an inherent trade-off between maximizing predictive accuracy and minimizing information leakage. Experiments across multiple datasets from recommender systems, knowledge graphs and quantum chemistry demonstrate that the proposed approach provides a robust defense across various graph structures and tasks, while producing competitive GNN encoders for downstream tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/liao21a/liao21a.pdf", "supp": "", "pdf_size": 2227378, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17996715912972296815&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Carnegie Mellon University; University of Illinois at Urbana-Champaign; Massachusetts Institute of Technology (MIT); Massachusetts Institute of Technology (MIT); Carnegie Mellon University; Massachusetts Institute of Technology (MIT); Carnegie Mellon University", "aff_domain": "andrew.cmu.edu;illinois.edu;mit.edu; ; ; ; ", "email": "andrew.cmu.edu;illinois.edu;mit.edu; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/liao21a.html", "aff_unique_index": "0;1;2;2;0;2;0", "aff_unique_norm": "Carnegie Mellon University;University of Illinois Urbana-Champaign;Massachusetts Institute of Technology", "aff_unique_dep": ";;", "aff_unique_url": "https://www.cmu.edu;https://illinois.edu;https://web.mit.edu", "aff_unique_abbr": "CMU;UIUC;MIT", "aff_campus_unique_index": "1", "aff_campus_unique": ";Urbana-Champaign", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Instabilities of Offline RL with Pre-Trained Neural Representation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9197", "id": "9197", "proceeding": "http://proceedings.mlr.press/v139/wang21z.html", "slides": "", "author_site": "Ruosong Wang, Yifan Wu, Ruslan Salakhutdinov, Sham Kakade", "author": "Ruosong Wang; Yifan Wu; Ruslan Salakhutdinov; Sham Kakade", "abstract": "In offline reinforcement learning (RL), we seek to utilize offline data to evaluate (or learn) policies in scenarios where the data are collected from a distribution that substantially differs from that of the target policy to be evaluated. Recent theoretical advances have shown that such sample-efficient offline RL is indeed possible provided certain strong representational conditions hold, else there are lower bounds exhibiting exponential error amplification (in the problem horizon) unless the data collection distribution has only a mild distribution shift relative to the target policy. This work studies these issues from an empirical perspective to gauge how stable offline RL methods are. In particular, our methodology explores these ideas when using features from pre-trained neural networks, in the hope that these representations are powerful enough to permit sample efficient offline RL. Through extensive experiments on a range of tasks, we see that substantial error amplification does occur even when using such pre-trained representations (trained on the same task itself); we find offline RL is stable only under extremely mild distribution shift. The implications of these results, both from a theoretical and an empirical perspective, are that successful offline RL (where we seek to go beyond the low distribution shift regime) requires substantially stronger conditions beyond those which suffice for successful supervised learning.", "bibtex": "@InProceedings{pmlr-v139-wang21z,\n title = \t {Instabilities of Offline RL with Pre-Trained Neural Representation},\n author = {Wang, Ruosong and Wu, Yifan and Salakhutdinov, Ruslan and Kakade, Sham},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10948--10960},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21z/wang21z.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21z.html},\n abstract = \t {In offline reinforcement learning (RL), we seek to utilize offline data to evaluate (or learn) policies in scenarios where the data are collected from a distribution that substantially differs from that of the target policy to be evaluated. Recent theoretical advances have shown that such sample-efficient offline RL is indeed possible provided certain strong representational conditions hold, else there are lower bounds exhibiting exponential error amplification (in the problem horizon) unless the data collection distribution has only a mild distribution shift relative to the target policy. This work studies these issues from an empirical perspective to gauge how stable offline RL methods are. In particular, our methodology explores these ideas when using features from pre-trained neural networks, in the hope that these representations are powerful enough to permit sample efficient offline RL. Through extensive experiments on a range of tasks, we see that substantial error amplification does occur even when using such pre-trained representations (trained on the same task itself); we find offline RL is stable only under extremely mild distribution shift. The implications of these results, both from a theoretical and an empirical perspective, are that successful offline RL (where we seek to go beyond the low distribution shift regime) requires substantially stronger conditions beyond those which suffice for successful supervised learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21z/wang21z.pdf", "supp": "", "pdf_size": 1915334, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9327276537971862735&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University + Microsoft Research + University of Washington; Microsoft Research + University of Washington", "aff_domain": "andrew.cmu.edu; ; ; ", "email": "andrew.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21z.html", "aff_unique_index": "0;0;0+1+2;1+2", "aff_unique_norm": "Carnegie Mellon University;Microsoft;University of Washington", "aff_unique_dep": ";Microsoft Research;", "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research;https://www.washington.edu", "aff_unique_abbr": "CMU;MSR;UW", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0+0;0+0", "aff_country_unique": "United States" }, { "title": "Instance Specific Approximations for Submodular Maximization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9073", "id": "9073", "proceeding": "http://proceedings.mlr.press/v139/balkanski21a.html", "slides": "/media/icml-2021/Slides/9073.pdf", "author_site": "Eric Balkanski, Sharon Qian, Yaron Singer", "author": "Eric Balkanski; Sharon Qian; Yaron Singer", "abstract": "The predominant measure for the performance of an algorithm is its worst-case approximation guarantee. While worst-case approximations give desirable robustness guarantees, they can differ significantly from the performance of an algorithm in practice. For the problem of monotone submodular maximization under a cardinality constraint, the greedy algorithm is known to obtain a 1-1/e approximation guarantee, which is optimal for a polynomial-time algorithm. However, very little is known about the approximation achieved by greedy and other submodular maximization algorithms on real instances. We develop an algorithm that gives an instance-specific approximation for any solution of an instance of monotone submodular maximization under a cardinality constraint. This algorithm uses a novel dual approach to submodular maximization. In particular, it relies on the construction of a lower bound to the dual objective that can also be exactly minimized. We use this algorithm to show that on a wide variety of real-world datasets and objectives, greedy and other algorithms find solutions that approximate the optimal solution significantly better than the 1-1/e \u00a0 0.63 worst-case approximation guarantee, often exceeding 0.9.", "bibtex": "@InProceedings{pmlr-v139-balkanski21a,\n title = \t {Instance Specific Approximations for Submodular Maximization},\n author = {Balkanski, Eric and Qian, Sharon and Singer, Yaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {609--618},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/balkanski21a/balkanski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/balkanski21a.html},\n abstract = \t {The predominant measure for the performance of an algorithm is its worst-case approximation guarantee. While worst-case approximations give desirable robustness guarantees, they can differ significantly from the performance of an algorithm in practice. For the problem of monotone submodular maximization under a cardinality constraint, the greedy algorithm is known to obtain a 1-1/e approximation guarantee, which is optimal for a polynomial-time algorithm. However, very little is known about the approximation achieved by greedy and other submodular maximization algorithms on real instances. We develop an algorithm that gives an instance-specific approximation for any solution of an instance of monotone submodular maximization under a cardinality constraint. This algorithm uses a novel dual approach to submodular maximization. In particular, it relies on the construction of a lower bound to the dual objective that can also be exactly minimized. We use this algorithm to show that on a wide variety of real-world datasets and objectives, greedy and other algorithms find solutions that approximate the optimal solution significantly better than the 1-1/e \u00a0 0.63 worst-case approximation guarantee, often exceeding 0.9.}\n}", "pdf": "http://proceedings.mlr.press/v139/balkanski21a/balkanski21a.pdf", "supp": "", "pdf_size": 2388498, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3214972078495091009&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Columbia University, New York, NY, USA; Harvard University, Cambridge, MA, USA; Harvard University, Cambridge, MA, USA", "aff_domain": "columbia.edu; ; ", "email": "columbia.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/balkanski21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Columbia University;Harvard University", "aff_unique_dep": ";", "aff_unique_url": "https://www.columbia.edu;https://www.harvard.edu", "aff_unique_abbr": "Columbia;Harvard", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "New York;Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Instance-Optimal Compressed Sensing via Posterior Sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8875", "id": "8875", "proceeding": "http://proceedings.mlr.press/v139/jalal21a.html", "slides": "/media/icml-2021/Slides/8875.pdf", "author_site": "Ajil Jalal, Sushrut Karmalkar, Alexandros Dimakis, Eric Price", "author": "Ajil Jalal; Sushrut Karmalkar; Alex Dimakis; Eric Price", "abstract": "We characterize the measurement complexity of compressed sensing of signals drawn from a known prior distribution, even when the support of the prior is the entire space (rather than, say, sparse vectors). We show for Gaussian measurements and \\emph{any} prior distribution on the signal, that the posterior sampling estimator achieves near-optimal recovery guarantees. Moreover, this result is robust to model mismatch, as long as the distribution estimate (e.g., from an invertible generative model) is close to the true distribution in Wasserstein distance. We implement the posterior sampling estimator for deep generative priors using Langevin dynamics, and empirically find that it produces accurate estimates with more diversity than MAP.", "bibtex": "@InProceedings{pmlr-v139-jalal21a,\n title = \t {Instance-Optimal Compressed Sensing via Posterior Sampling},\n author = {Jalal, Ajil and Karmalkar, Sushrut and Dimakis, Alex and Price, Eric},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4709--4720},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jalal21a/jalal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jalal21a.html},\n abstract = \t {We characterize the measurement complexity of compressed sensing of signals drawn from a known prior distribution, even when the support of the prior is the entire space (rather than, say, sparse vectors). We show for Gaussian measurements and \\emph{any} prior distribution on the signal, that the posterior sampling estimator achieves near-optimal recovery guarantees. Moreover, this result is robust to model mismatch, as long as the distribution estimate (e.g., from an invertible generative model) is close to the true distribution in Wasserstein distance. We implement the posterior sampling estimator for deep generative priors using Langevin dynamics, and empirically find that it produces accurate estimates with more diversity than MAP.}\n}", "pdf": "http://proceedings.mlr.press/v139/jalal21a/jalal21a.pdf", "supp": "", "pdf_size": 10284935, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13669430670080066426&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Texas at Austin, Department of Electrical and Computer Engineering; University of Texas at Austin, Department of Computer Science; University of Texas at Austin, Department of Electrical and Computer Engineering; University of Texas at Austin, Department of Computer Science", "aff_domain": "utexas.edu;cs.utexas.edu;austin.utexas.edu;cs.utexas.edu", "email": "utexas.edu;cs.utexas.edu;austin.utexas.edu;cs.utexas.edu", "github": "https://github.com/ajiljalal/code-cs-fairness", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jalal21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Integer Programming for Causal Structure Learning in the Presence of Latent Variables", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10075", "id": "10075", "proceeding": "http://proceedings.mlr.press/v139/chen21c.html", "slides": "/media/icml-2021/Slides/10075.pdf", "author_site": "Rui Chen, Sanjeeb Dash, Tian Gao", "author": "Rui Chen; Sanjeeb Dash; Tian Gao", "abstract": "The problem of finding an ancestral acyclic directed mixed graph (ADMG) that represents the causal relationships between a set of variables is an important area of research on causal inference. Most existing score-based structure learning methods focus on learning directed acyclic graph (DAG) models without latent variables. A number of score-based methods have recently been proposed for the ADMG learning, yet they are heuristic in nature and do not guarantee an optimal solution. We propose a novel exact score-based method that solves an integer programming (IP) formulation and returns a score-maximizing ancestral ADMG for a set of continuous variables that follow a multivariate Gaussian distribution. We generalize the state-of-the-art IP model for DAG learning problems and derive new classes of valid inequalities to formulate an IP model for ADMG learning. Empirically, our model can be solved efficiently for medium-sized problems and achieves better accuracy than state-of-the-art score-based methods as well as benchmark constraint-based methods.", "bibtex": "@InProceedings{pmlr-v139-chen21c,\n title = \t {Integer Programming for Causal Structure Learning in the Presence of Latent Variables},\n author = {Chen, Rui and Dash, Sanjeeb and Gao, Tian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1550--1560},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21c/chen21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21c.html},\n abstract = \t {The problem of finding an ancestral acyclic directed mixed graph (ADMG) that represents the causal relationships between a set of variables is an important area of research on causal inference. Most existing score-based structure learning methods focus on learning directed acyclic graph (DAG) models without latent variables. A number of score-based methods have recently been proposed for the ADMG learning, yet they are heuristic in nature and do not guarantee an optimal solution. We propose a novel exact score-based method that solves an integer programming (IP) formulation and returns a score-maximizing ancestral ADMG for a set of continuous variables that follow a multivariate Gaussian distribution. We generalize the state-of-the-art IP model for DAG learning problems and derive new classes of valid inequalities to formulate an IP model for ADMG learning. Empirically, our model can be solved efficiently for medium-sized problems and achieves better accuracy than state-of-the-art score-based methods as well as benchmark constraint-based methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21c/chen21c.pdf", "supp": "", "pdf_size": 308153, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14082497365746391672&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Industrial and Systems Engineering, University of Wisconsin-Madison, Madison, Wisconsin, USA; IBM Research, Yorktown Heights, New York, USA; IBM Research, Yorktown Heights, New York, USA", "aff_domain": "wisc.edu; ; ", "email": "wisc.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chen21c.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Wisconsin-Madison;IBM", "aff_unique_dep": "Department of Industrial and Systems Engineering;IBM Research", "aff_unique_url": "https://www.wisc.edu;https://www.ibm.com/research", "aff_unique_abbr": "UW-Madison;IBM", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Madison;Yorktown Heights", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Integrated Defense for Resilient Graph Matching", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9569", "id": "9569", "proceeding": "http://proceedings.mlr.press/v139/ren21c.html", "slides": "", "author_site": "Jiaxiang Ren, Zijie Zhang, Jiayin Jin, Xin Zhao, Sixing Wu, Yang Zhou, Yelong Shen, Tianshi Che, Ruoming Jin, Dejing Dou", "author": "Jiaxiang Ren; Zijie Zhang; Jiayin Jin; Xin Zhao; Sixing Wu; Yang Zhou; Yelong Shen; Tianshi Che; Ruoming Jin; Dejing Dou", "abstract": "A recent study has shown that graph matching models are vulnerable to adversarial manipulation of their input which is intended to cause a mismatching. Nevertheless, there is still a lack of a comprehensive solution for further enhancing the robustness of graph matching against adversarial attacks. In this paper, we identify and study two types of unique topology attacks in graph matching: inter-graph dispersion and intra-graph assembly attacks. We propose an integrated defense model, IDRGM, for resilient graph matching with two novel defense techniques to defend against the above two attacks simultaneously. A detection technique of inscribed simplexes in the hyperspheres consisting of multiple matched nodes is proposed to tackle inter-graph dispersion attacks, in which the distances among the matched nodes in multiple graphs are maximized to form regular simplexes. A node separation method based on phase-type distribution and maximum likelihood estimation is developed to estimate the distribution of perturbed graphs and separate the nodes within the same graphs over a wide space, for defending intra-graph assembly attacks, such that the interference from the similar neighbors of the perturbed nodes is significantly reduced. We evaluate the robustness of our IDRGM model on real datasets against state-of-the-art algorithms.", "bibtex": "@InProceedings{pmlr-v139-ren21c,\n title = \t {Integrated Defense for Resilient Graph Matching},\n author = {Ren, Jiaxiang and Zhang, Zijie and Jin, Jiayin and Zhao, Xin and Wu, Sixing and Zhou, Yang and Shen, Yelong and Che, Tianshi and Jin, Ruoming and Dou, Dejing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8982--8997},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ren21c/ren21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/ren21c.html},\n abstract = \t {A recent study has shown that graph matching models are vulnerable to adversarial manipulation of their input which is intended to cause a mismatching. Nevertheless, there is still a lack of a comprehensive solution for further enhancing the robustness of graph matching against adversarial attacks. In this paper, we identify and study two types of unique topology attacks in graph matching: inter-graph dispersion and intra-graph assembly attacks. We propose an integrated defense model, IDRGM, for resilient graph matching with two novel defense techniques to defend against the above two attacks simultaneously. A detection technique of inscribed simplexes in the hyperspheres consisting of multiple matched nodes is proposed to tackle inter-graph dispersion attacks, in which the distances among the matched nodes in multiple graphs are maximized to form regular simplexes. A node separation method based on phase-type distribution and maximum likelihood estimation is developed to estimate the distribution of perturbed graphs and separate the nodes within the same graphs over a wide space, for defending intra-graph assembly attacks, such that the interference from the similar neighbors of the perturbed nodes is significantly reduced. We evaluate the robustness of our IDRGM model on real datasets against state-of-the-art algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/ren21c/ren21c.pdf", "supp": "", "pdf_size": 832307, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17575056119162492776&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Auburn University, USA; Auburn University, USA; Auburn University, USA; Auburn University, USA; Peking University, China; Auburn University, USA; Microsoft Dynamics 365 AI, USA; Auburn University, USA; Kent State University, USA; University of Oregon, USA + Baidu Research, China", "aff_domain": "auburn.edu;auburn.edu;auburn.edu;auburn.edu;pku.edu.cn;auburn.edu;microsoft.com;auburn.edu;kent.edu;uoregon.edu", "email": "auburn.edu;auburn.edu;auburn.edu;auburn.edu;pku.edu.cn;auburn.edu;microsoft.com;auburn.edu;kent.edu;uoregon.edu", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v139/ren21c.html", "aff_unique_index": "0;0;0;0;1;0;2;0;3;4+5", "aff_unique_norm": "Auburn University;Peking University;Microsoft;Kent State University;University of Oregon;Baidu", "aff_unique_dep": ";;Microsoft Dynamics 365 AI;;;Baidu Research", "aff_unique_url": "https://www.auburn.edu;http://www.pku.edu.cn;https://www.microsoft.com;https://www.kent.edu;https://www.uoregon.edu;https://research.baidu.com", "aff_unique_abbr": "Auburn;Peking U;MSFT;KSU;UO;Baidu", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;1;0;0;0;0;0+1", "aff_country_unique": "United States;China" }, { "title": "Interaction-Grounded Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8937", "id": "8937", "proceeding": "http://proceedings.mlr.press/v139/xie21e.html", "slides": "/media/icml-2021/Slides/8937.pdf", "author_site": "Tengyang Xie, John Langford, Paul Mineiro, Ida Momennejad", "author": "Tengyang Xie; John Langford; Paul Mineiro; Ida Momennejad", "abstract": "Consider a prosthetic arm, learning to adapt to its user\u2019s control signals. We propose \\emph{Interaction-Grounded Learning} for this novel setting, in which a learner\u2019s goal is to interact with the environment with no grounding or explicit reward to optimize its policies. Such a problem evades common RL solutions which require an explicit reward. The learning agent observes a multidimensional \\emph{context vector}, takes an \\emph{action}, and then observes a multidimensional \\emph{feedback vector}. This multidimensional feedback vector has \\emph{no} explicit reward information. In order to succeed, the algorithm must learn how to evaluate the feedback vector to discover a latent reward signal, with which it can ground its policies without supervision. We show that in an Interaction-Grounded Learning setting, with certain natural assumptions, a learner can discover the latent reward and ground its policy for successful interaction. We provide theoretical guarantees and a proof-of-concept empirical evaluation to demonstrate the effectiveness of our proposed approach.", "bibtex": "@InProceedings{pmlr-v139-xie21e,\n title = \t {Interaction-Grounded Learning},\n author = {Xie, Tengyang and Langford, John and Mineiro, Paul and Momennejad, Ida},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11414--11423},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21e/xie21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21e.html},\n abstract = \t {Consider a prosthetic arm, learning to adapt to its user\u2019s control signals. We propose \\emph{Interaction-Grounded Learning} for this novel setting, in which a learner\u2019s goal is to interact with the environment with no grounding or explicit reward to optimize its policies. Such a problem evades common RL solutions which require an explicit reward. The learning agent observes a multidimensional \\emph{context vector}, takes an \\emph{action}, and then observes a multidimensional \\emph{feedback vector}. This multidimensional feedback vector has \\emph{no} explicit reward information. In order to succeed, the algorithm must learn how to evaluate the feedback vector to discover a latent reward signal, with which it can ground its policies without supervision. We show that in an Interaction-Grounded Learning setting, with certain natural assumptions, a learner can discover the latent reward and ground its policy for successful interaction. We provide theoretical guarantees and a proof-of-concept empirical evaluation to demonstrate the effectiveness of our proposed approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21e/xie21e.pdf", "supp": "", "pdf_size": 1226344, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13612984690164749979&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Illinois at Urbana-Champaign; Microsoft Research, New York City; Microsoft Research, New York City; Microsoft Research, New York City", "aff_domain": "illinois.edu;microsoft.com;microsoft.com;microsoft.com", "email": "illinois.edu;microsoft.com;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xie21e.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of Illinois Urbana-Champaign;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://illinois.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UIUC;MSR", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Urbana-Champaign;New York City", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Interactive Learning from Activity Description", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10497", "id": "10497", "proceeding": "http://proceedings.mlr.press/v139/nguyen21e.html", "slides": "/media/icml-2021/Slides/10497.pdf", "author_site": "Khanh Nguyen, Dipendra Misra, Robert Schapire, Miroslav Dudik, Patrick Shafto", "author": "Khanh X Nguyen; Dipendra Misra; Robert Schapire; Miroslav Dudik; Patrick Shafto", "abstract": "We present a novel interactive learning protocol that enables training request-fulfilling agents by verbally describing their activities. Unlike imitation learning (IL), our protocol allows the teaching agent to provide feedback in a language that is most appropriate for them. Compared with reward in reinforcement learning (RL), the description feedback is richer and allows for improved sample complexity. We develop a probabilistic framework and an algorithm that practically implements our protocol. Empirical results in two challenging request-fulfilling problems demonstrate the strengths of our approach: compared with RL baselines, it is more sample-efficient; compared with IL baselines, it achieves competitive success rates without requiring the teaching agent to be able to demonstrate the desired behavior using the learning agent\u2019s actions. Apart from empirical evaluation, we also provide theoretical guarantees for our algorithm under certain assumptions about the teacher and the environment.", "bibtex": "@InProceedings{pmlr-v139-nguyen21e,\n title = \t {Interactive Learning from Activity Description},\n author = {Nguyen, Khanh X and Misra, Dipendra and Schapire, Robert and Dudik, Miroslav and Shafto, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8096--8108},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21e/nguyen21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21e.html},\n abstract = \t {We present a novel interactive learning protocol that enables training request-fulfilling agents by verbally describing their activities. Unlike imitation learning (IL), our protocol allows the teaching agent to provide feedback in a language that is most appropriate for them. Compared with reward in reinforcement learning (RL), the description feedback is richer and allows for improved sample complexity. We develop a probabilistic framework and an algorithm that practically implements our protocol. Empirical results in two challenging request-fulfilling problems demonstrate the strengths of our approach: compared with RL baselines, it is more sample-efficient; compared with IL baselines, it achieves competitive success rates without requiring the teaching agent to be able to demonstrate the desired behavior using the learning agent\u2019s actions. Apart from empirical evaluation, we also provide theoretical guarantees for our algorithm under certain assumptions about the teacher and the environment.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21e/nguyen21e.pdf", "supp": "", "pdf_size": 686739, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6188595152759271430&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of Maryland, Maryland, USA; Microsoft Research, New York, USA; Microsoft Research, New York, USA; Microsoft Research, New York, USA; Rutgers University, New Jersey, USA", "aff_domain": "umd.edu; ; ; ; ", "email": "umd.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/nguyen21e.html", "aff_unique_index": "0;1;1;1;2", "aff_unique_norm": "University of Maryland;Microsoft;Rutgers University", "aff_unique_dep": "Department of Computer Science;Microsoft Research;", "aff_unique_url": "https://www.umd.edu;https://www.microsoft.com/en-us/research;https://www.rutgers.edu", "aff_unique_abbr": "UMD;MSR;Rutgers", "aff_campus_unique_index": "0;1;1;1;2", "aff_campus_unique": "Maryland;New York;New Jersey", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Intermediate Layer Optimization for Inverse Problems using Deep Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9231", "id": "9231", "proceeding": "http://proceedings.mlr.press/v139/daras21a.html", "slides": "/media/icml-2021/Slides/9231.pdf", "author_site": "Giannis Daras, Joseph Dean, Ajil Jalal, Alexandros Dimakis", "author": "Giannis Daras; Joseph Dean; Ajil Jalal; Alex Dimakis", "abstract": "We propose Intermediate Layer Optimization (ILO), a novel optimization algorithm for solving inverse problems with deep generative models. Instead of optimizing only over the initial latent code, we progressively change the input layer obtaining successively more expressive generators. To explore the higher dimensional spaces, our method searches for latent codes that lie within a small l1 ball around the manifold induced by the previous layer. Our theoretical analysis shows that by keeping the radius of the ball relatively small, we can improve the established error bound for compressed sensing with deep generative models. We empirically show that our approach outperforms state-of-the-art methods introduced in StyleGAN2 and PULSE for a wide range of inverse problems including inpainting, denoising, super-resolution and compressed sensing.", "bibtex": "@InProceedings{pmlr-v139-daras21a,\n title = \t {Intermediate Layer Optimization for Inverse Problems using Deep Generative Models},\n author = {Daras, Giannis and Dean, Joseph and Jalal, Ajil and Dimakis, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2421--2432},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/daras21a/daras21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/daras21a.html},\n abstract = \t {We propose Intermediate Layer Optimization (ILO), a novel optimization algorithm for solving inverse problems with deep generative models. Instead of optimizing only over the initial latent code, we progressively change the input layer obtaining successively more expressive generators. To explore the higher dimensional spaces, our method searches for latent codes that lie within a small l1 ball around the manifold induced by the previous layer. Our theoretical analysis shows that by keeping the radius of the ball relatively small, we can improve the established error bound for compressed sensing with deep generative models. We empirically show that our approach outperforms state-of-the-art methods introduced in StyleGAN2 and PULSE for a wide range of inverse problems including inpainting, denoising, super-resolution and compressed sensing.}\n}", "pdf": "http://proceedings.mlr.press/v139/daras21a/daras21a.pdf", "supp": "", "pdf_size": 9967570, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10888680252420581266&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin", "aff_domain": "utexas.edu;utexas.edu;utexas.edu;austin.utexas.edu", "email": "utexas.edu;utexas.edu;utexas.edu;austin.utexas.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/daras21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Interpretable Stability Bounds for Spectral Graph Filters", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10145", "id": "10145", "proceeding": "http://proceedings.mlr.press/v139/kenlay21a.html", "slides": "/media/icml-2021/Slides/10145.pdf", "author_site": "Henry Kenlay, Dorina Thanou, Xiaowen Dong", "author": "Henry Kenlay; Dorina Thanou; Xiaowen Dong", "abstract": "Graph-structured data arise in a variety of real-world context ranging from sensor and transportation to biological and social networks. As a ubiquitous tool to process graph-structured data, spectral graph filters have been used to solve common tasks such as denoising and anomaly detection, as well as design deep learning architectures such as graph neural networks. Despite being an important tool, there is a lack of theoretical understanding of the stability properties of spectral graph filters, which are important for designing robust machine learning models. In this paper, we study filter stability and provide a novel and interpretable upper bound on the change of filter output, where the bound is expressed in terms of the endpoint degrees of the deleted and newly added edges, as well as the spatial proximity of those edges. This upper bound allows us to reason, in terms of structural properties of the graph, when a spectral graph filter will be stable. We further perform extensive experiments to verify intuition that can be gained from the bound.", "bibtex": "@InProceedings{pmlr-v139-kenlay21a,\n title = \t {Interpretable Stability Bounds for Spectral Graph Filters},\n author = {Kenlay, Henry and Thanou, Dorina and Dong, Xiaowen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5388--5397},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kenlay21a/kenlay21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kenlay21a.html},\n abstract = \t {Graph-structured data arise in a variety of real-world context ranging from sensor and transportation to biological and social networks. As a ubiquitous tool to process graph-structured data, spectral graph filters have been used to solve common tasks such as denoising and anomaly detection, as well as design deep learning architectures such as graph neural networks. Despite being an important tool, there is a lack of theoretical understanding of the stability properties of spectral graph filters, which are important for designing robust machine learning models. In this paper, we study filter stability and provide a novel and interpretable upper bound on the change of filter output, where the bound is expressed in terms of the endpoint degrees of the deleted and newly added edges, as well as the spatial proximity of those edges. This upper bound allows us to reason, in terms of structural properties of the graph, when a spectral graph filter will be stable. We further perform extensive experiments to verify intuition that can be gained from the bound.}\n}", "pdf": "http://proceedings.mlr.press/v139/kenlay21a/kenlay21a.pdf", "supp": "", "pdf_size": 686322, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11114419187878588285&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of Oxford; \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne; University of Oxford", "aff_domain": "robots.ox.ac.uk; ; ", "email": "robots.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kenlay21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Oxford;EPFL", "aff_unique_dep": ";", "aff_unique_url": "https://www.ox.ac.uk;https://www.epfl.ch", "aff_unique_abbr": "Oxford;EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;Switzerland" }, { "title": "Interpretable Stein Goodness-of-fit Tests on Riemannian Manifold", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10699", "id": "10699", "proceeding": "http://proceedings.mlr.press/v139/xu21c.html", "slides": "/media/icml-2021/Slides/10699.pdf", "author_site": "Wenkai Xu, Takeru Matsuda", "author": "Wenkai Xu; Takeru Matsuda", "abstract": "In many applications, we encounter data on Riemannian manifolds such as torus and rotation groups. Standard statistical procedures for multivariate data are not applicable to such data. In this study, we develop goodness-of-fit testing and interpretable model criticism methods for general distributions on Riemannian manifolds, including those with an intractable normalization constant. The proposed methods are based on extensions of kernel Stein discrepancy, which are derived from Stein operators on Riemannian manifolds. We discuss the connections between the proposed tests with existing ones and provide a theoretical analysis of their asymptotic Bahadur efficiency. Simulation results and real data applications show the validity and usefulness of the proposed methods.", "bibtex": "@InProceedings{pmlr-v139-xu21c,\n title = \t {Interpretable Stein Goodness-of-fit Tests on Riemannian Manifold},\n author = {Xu, Wenkai and Matsuda, Takeru},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11502--11513},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21c/xu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21c.html},\n abstract = \t {In many applications, we encounter data on Riemannian manifolds such as torus and rotation groups. Standard statistical procedures for multivariate data are not applicable to such data. In this study, we develop goodness-of-fit testing and interpretable model criticism methods for general distributions on Riemannian manifolds, including those with an intractable normalization constant. The proposed methods are based on extensions of kernel Stein discrepancy, which are derived from Stein operators on Riemannian manifolds. We discuss the connections between the proposed tests with existing ones and provide a theoretical analysis of their asymptotic Bahadur efficiency. Simulation results and real data applications show the validity and usefulness of the proposed methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21c/xu21c.pdf", "supp": "", "pdf_size": 4089366, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1640275057378487903&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Gatsby Computational Neuroscience Unit, London, United Kingdom; RIKEN Center for Brain Science, Tokyo, Japan", "aff_domain": "gmail.com;riken.jp", "email": "gmail.com;riken.jp", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/xu21c.html", "aff_unique_index": "1", "aff_unique_norm": ";RIKEN Center for Brain Science", "aff_unique_dep": ";", "aff_unique_url": ";https://www.riken.jp/en/cbs/", "aff_unique_abbr": ";", "aff_campus_unique_index": "1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "1", "aff_country_unique": ";Japan" }, { "title": "Interpreting and Disentangling Feature Components of Various Complexity from DNNs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10221", "id": "10221", "proceeding": "http://proceedings.mlr.press/v139/ren21b.html", "slides": "", "author_site": "Jie Ren, Mingjie Li, Zexu Liu, Quanshi Zhang", "author": "Jie Ren; Mingjie Li; Zexu Liu; Quanshi Zhang", "abstract": "This paper aims to define, visualize, and analyze the feature complexity that is learned by a DNN. We propose a generic definition for the feature complexity. Given the feature of a certain layer in the DNN, our method decomposes and visualizes feature components of different complexity orders from the feature. The feature decomposition enables us to evaluate the reliability, the effectiveness, and the significance of over-fitting of these feature components. Furthermore, such analysis helps to improve the performance of DNNs. As a generic method, the feature complexity also provides new insights into existing deep-learning techniques, such as network compression and knowledge distillation.", "bibtex": "@InProceedings{pmlr-v139-ren21b,\n title = \t {Interpreting and Disentangling Feature Components of Various Complexity from DNNs},\n author = {Ren, Jie and Li, Mingjie and Liu, Zexu and Zhang, Quanshi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8971--8981},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ren21b/ren21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/ren21b.html},\n abstract = \t {This paper aims to define, visualize, and analyze the feature complexity that is learned by a DNN. We propose a generic definition for the feature complexity. Given the feature of a certain layer in the DNN, our method decomposes and visualizes feature components of different complexity orders from the feature. The feature decomposition enables us to evaluate the reliability, the effectiveness, and the significance of over-fitting of these feature components. Furthermore, such analysis helps to improve the performance of DNNs. As a generic method, the feature complexity also provides new insights into existing deep-learning techniques, such as network compression and knowledge distillation.}\n}", "pdf": "http://proceedings.mlr.press/v139/ren21b/ren21b.pdf", "supp": "", "pdf_size": 1011428, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1317009935955813260&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; John Hopcroft Center and the MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University", "aff_domain": "sjtu.edu.cn; ; ; ", "email": "sjtu.edu.cn; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ren21b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Shanghai Jiao Tong University", "aff_unique_dep": "", "aff_unique_url": "https://www.sjtu.edu.cn", "aff_unique_abbr": "SJTU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Shanghai", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Inverse Constrained Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9667", "id": "9667", "proceeding": "http://proceedings.mlr.press/v139/malik21a.html", "slides": "/media/icml-2021/Slides/9667.pdf", "author_site": "Shehryar Malik, Usman Anwar, Alireza Aghasi, Ali Ahmed", "author": "Shehryar Malik; Usman Anwar; Alireza Aghasi; Ali Ahmed", "abstract": "In real world settings, numerous constraints are present which are hard to specify mathematically. However, for the real world deployment of reinforcement learning (RL), it is critical that RL agents are aware of these constraints, so that they can act safely. In this work, we consider the problem of learning constraints from demonstrations of a constraint-abiding agent\u2019s behavior. We experimentally validate our approach and show that our framework can successfully learn the most likely constraints that the agent respects. We further show that these learned constraints are \\textit{transferable} to new agents that may have different morphologies and/or reward functions. Previous works in this regard have either mainly been restricted to tabular (discrete) settings, specific types of constraints or assume the environment\u2019s transition dynamics. In contrast, our framework is able to learn arbitrary \\textit{Markovian} constraints in high-dimensions in a completely model-free setting. The code is available at: \\url{https://github.com/shehryar-malik/icrl}.", "bibtex": "@InProceedings{pmlr-v139-malik21a,\n title = \t {Inverse Constrained Reinforcement Learning},\n author = {Malik, Shehryar and Anwar, Usman and Aghasi, Alireza and Ahmed, Ali},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7390--7399},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/malik21a/malik21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/malik21a.html},\n abstract = \t {In real world settings, numerous constraints are present which are hard to specify mathematically. However, for the real world deployment of reinforcement learning (RL), it is critical that RL agents are aware of these constraints, so that they can act safely. In this work, we consider the problem of learning constraints from demonstrations of a constraint-abiding agent\u2019s behavior. We experimentally validate our approach and show that our framework can successfully learn the most likely constraints that the agent respects. We further show that these learned constraints are \\textit{transferable} to new agents that may have different morphologies and/or reward functions. Previous works in this regard have either mainly been restricted to tabular (discrete) settings, specific types of constraints or assume the environment\u2019s transition dynamics. In contrast, our framework is able to learn arbitrary \\textit{Markovian} constraints in high-dimensions in a completely model-free setting. The code is available at: \\url{https://github.com/shehryar-malik/icrl}.}\n}", "pdf": "http://proceedings.mlr.press/v139/malik21a/malik21a.pdf", "supp": "", "pdf_size": 6664905, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6882447057123293006&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Information Technology University, Lahore, Pakistan; Information Technology University, Lahore, Pakistan; Georgia State University, Atlanta, GA, USA; Information Technology University, Lahore, Pakistan", "aff_domain": "itu.edu.pk;itu.edu.pk;gsu.edu;itu.edu.pk", "email": "itu.edu.pk;itu.edu.pk;gsu.edu;itu.edu.pk", "github": "https://github.com/shehryar-malik/icrl", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/malik21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Information Technology University;Georgia State University", "aff_unique_dep": ";", "aff_unique_url": "https://www.itu.edu.pk;https://www.gsu.edu", "aff_unique_abbr": ";GSU", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Lahore;Atlanta", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "Pakistan;United States" }, { "title": "Inverse Decision Modeling: Learning Interpretable Representations of Behavior", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9785", "id": "9785", "proceeding": "http://proceedings.mlr.press/v139/jarrett21a.html", "slides": "", "author_site": "Daniel Jarrett, Alihan H\u00fcy\u00fck, Mihaela van der Schaar", "author": "Daniel Jarrett; Alihan H\u00fcy\u00fck; Mihaela Van Der Schaar", "abstract": "Decision analysis deals with modeling and enhancing decision processes. A principal challenge in improving behavior is in obtaining a transparent *description* of existing behavior in the first place. In this paper, we develop an expressive, unifying perspective on *inverse decision modeling*: a framework for learning parameterized representations of sequential decision behavior. First, we formalize the *forward* problem (as a normative standard), subsuming common classes of control behavior. Second, we use this to formalize the *inverse* problem (as a descriptive model), generalizing existing work on imitation/reward learning\u2014while opening up a much broader class of research problems in behavior representation. Finally, we instantiate this approach with an example (*inverse bounded rational control*), illustrating how this structure enables learning (interpretable) representations of (bounded) rationality\u2014while naturally capturing intuitive notions of suboptimal actions, biased beliefs, and imperfect knowledge of environments.", "bibtex": "@InProceedings{pmlr-v139-jarrett21a,\n title = \t {Inverse Decision Modeling: Learning Interpretable Representations of Behavior},\n author = {Jarrett, Daniel and H{\\\"u}y{\\\"u}k, Alihan and Van Der Schaar, Mihaela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4755--4771},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jarrett21a/jarrett21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jarrett21a.html},\n abstract = \t {Decision analysis deals with modeling and enhancing decision processes. A principal challenge in improving behavior is in obtaining a transparent *description* of existing behavior in the first place. In this paper, we develop an expressive, unifying perspective on *inverse decision modeling*: a framework for learning parameterized representations of sequential decision behavior. First, we formalize the *forward* problem (as a normative standard), subsuming common classes of control behavior. Second, we use this to formalize the *inverse* problem (as a descriptive model), generalizing existing work on imitation/reward learning\u2014while opening up a much broader class of research problems in behavior representation. Finally, we instantiate this approach with an example (*inverse bounded rational control*), illustrating how this structure enables learning (interpretable) representations of (bounded) rationality\u2014while naturally capturing intuitive notions of suboptimal actions, biased beliefs, and imperfect knowledge of environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/jarrett21a/jarrett21a.pdf", "supp": "", "pdf_size": 4656977, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=623499770319383084&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Applied Mathematics and Theoretical Physics, University of Cambridge, UK; Department of Electrical Engineering, University of California, Los Angeles, USA; Department of Applied Mathematics and Theoretical Physics, University of Cambridge, UK + Department of Electrical Engineering, University of California, Los Angeles, USA", "aff_domain": "maths.cam.ac.uk; ;ee.ucla.edu", "email": "maths.cam.ac.uk; ;ee.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jarrett21a.html", "aff_unique_index": "0;1;0+1", "aff_unique_norm": "University of Cambridge;University of California, Los Angeles", "aff_unique_dep": "Department of Applied Mathematics and Theoretical Physics;Department of Electrical Engineering", "aff_unique_url": "https://www.cam.ac.uk;https://www.ucla.edu", "aff_unique_abbr": "Cambridge;UCLA", "aff_campus_unique_index": "0;1;0+1", "aff_campus_unique": "Cambridge;Los Angeles", "aff_country_unique_index": "0;1;0+1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Is Pessimism Provably Efficient for Offline RL?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10409", "id": "10409", "proceeding": "http://proceedings.mlr.press/v139/jin21e.html", "slides": "/media/icml-2021/Slides/10409.pdf", "author_site": "Ying Jin, Zhuoran Yang, Zhaoran Wang", "author": "Ying Jin; Zhuoran Yang; Zhaoran Wang", "abstract": "We study offline reinforcement learning (RL), which aims to learn an optimal policy based on a dataset collected a priori. Due to the lack of further interactions with the environment, offline RL suffers from the insufficient coverage of the dataset, which eludes most existing theoretical analysis. In this paper, we propose a pessimistic variant of the value iteration algorithm (PEVI), which incorporates an uncertainty quantifier as the penalty function. Such a penalty function simply flips the sign of the bonus function for promoting exploration in online RL, which makes it easily implementable and compatible with general function approximators. Without assuming the sufficient coverage of the dataset, we establish a data-dependent upper bound on the suboptimality of PEVI for general Markov decision processes (MDPs). When specialized to linear MDPs, it matches the information-theoretic lower bound up to multiplicative factors of the dimension and horizon. In other words, pessimism is not only provably efficient but also minimax optimal. In particular, given the dataset, the learned policy serves as the \u201cbest effort\u201d among all policies, as no other policies can do better. Our theoretical analysis identifies the critical role of pessimism in eliminating a notion of spurious correlation, which emerges from the \u201cirrelevant\u201d trajectories that are less covered by the dataset and not informative for the optimal policy.", "bibtex": "@InProceedings{pmlr-v139-jin21e,\n title = \t {Is Pessimism Provably Efficient for Offline RL?},\n author = {Jin, Ying and Yang, Zhuoran and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5084--5096},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jin21e/jin21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/jin21e.html},\n abstract = \t {We study offline reinforcement learning (RL), which aims to learn an optimal policy based on a dataset collected a priori. Due to the lack of further interactions with the environment, offline RL suffers from the insufficient coverage of the dataset, which eludes most existing theoretical analysis. In this paper, we propose a pessimistic variant of the value iteration algorithm (PEVI), which incorporates an uncertainty quantifier as the penalty function. Such a penalty function simply flips the sign of the bonus function for promoting exploration in online RL, which makes it easily implementable and compatible with general function approximators. Without assuming the sufficient coverage of the dataset, we establish a data-dependent upper bound on the suboptimality of PEVI for general Markov decision processes (MDPs). When specialized to linear MDPs, it matches the information-theoretic lower bound up to multiplicative factors of the dimension and horizon. In other words, pessimism is not only provably efficient but also minimax optimal. In particular, given the dataset, the learned policy serves as the \u201cbest effort\u201d among all policies, as no other policies can do better. Our theoretical analysis identifies the critical role of pessimism in eliminating a notion of spurious correlation, which emerges from the \u201cirrelevant\u201d trajectories that are less covered by the dataset and not informative for the optimal policy.}\n}", "pdf": "http://proceedings.mlr.press/v139/jin21e/jin21e.pdf", "supp": "", "pdf_size": 909082, "gs_citation": 499, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8280846013951370811&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Statistics, Stanford University; Department of Operations Research and Financial Engineering, Princeton University; Department of Industrial Engineering and Management Sciences, Northwestern University", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jin21e.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Stanford University;Princeton University;Northwestern University", "aff_unique_dep": "Department of Statistics;Department of Operations Research and Financial Engineering;Department of Industrial Engineering and Management Sciences", "aff_unique_url": "https://www.stanford.edu;https://www.princeton.edu;https://www.northwestern.edu", "aff_unique_abbr": "Stanford;Princeton;NU", "aff_campus_unique_index": "0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Is Space-Time Attention All You Need for Video Understanding?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8941", "id": "8941", "proceeding": "http://proceedings.mlr.press/v139/bertasius21a.html", "slides": "", "author_site": "Gedas Bertasius, Heng Wang, Lorenzo Torresani", "author": "Gedas Bertasius; Heng Wang; Lorenzo Torresani", "abstract": "We present a convolution-free approach to video classification built exclusively on self-attention over space and time. Our method, named \u201cTimeSformer,\u201d adapts the standard Transformer architecture to video by enabling spatiotemporal feature learning directly from a sequence of frame-level patches. Our experimental study compares different self-attention schemes and suggests that \u201cdivided attention,\u201d where temporal attention and spatial attention are separately applied within each block, leads to the best video classification accuracy among the design choices considered. Despite the radically new design, TimeSformer achieves state-of-the-art results on several action recognition benchmarks, including the best reported accuracy on Kinetics-400 and Kinetics-600. Finally, compared to 3D convolutional networks, our model is faster to train, it can achieve dramatically higher test efficiency (at a small drop in accuracy), and it can also be applied to much longer video clips (over one minute long). Code and models are available at: https://github.com/facebookresearch/TimeSformer.", "bibtex": "@InProceedings{pmlr-v139-bertasius21a,\n title = \t {Is Space-Time Attention All You Need for Video Understanding?},\n author = {Bertasius, Gedas and Wang, Heng and Torresani, Lorenzo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {813--824},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bertasius21a/bertasius21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bertasius21a.html},\n abstract = \t {We present a convolution-free approach to video classification built exclusively on self-attention over space and time. Our method, named \u201cTimeSformer,\u201d adapts the standard Transformer architecture to video by enabling spatiotemporal feature learning directly from a sequence of frame-level patches. Our experimental study compares different self-attention schemes and suggests that \u201cdivided attention,\u201d where temporal attention and spatial attention are separately applied within each block, leads to the best video classification accuracy among the design choices considered. Despite the radically new design, TimeSformer achieves state-of-the-art results on several action recognition benchmarks, including the best reported accuracy on Kinetics-400 and Kinetics-600. Finally, compared to 3D convolutional networks, our model is faster to train, it can achieve dramatically higher test efficiency (at a small drop in accuracy), and it can also be applied to much longer video clips (over one minute long). Code and models are available at: https://github.com/facebookresearch/TimeSformer.}\n}", "pdf": "http://proceedings.mlr.press/v139/bertasius21a/bertasius21a.pdf", "supp": "", "pdf_size": 8570611, "gs_citation": 2678, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6828425192739736056&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Facebook AI; Facebook AI; Dartmouth College", "aff_domain": "seas.upenn.edu; ; ", "email": "seas.upenn.edu; ; ", "github": "https://github.com/facebookresearch/TimeSformer", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bertasius21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Meta;Dartmouth College", "aff_unique_dep": "Facebook AI;", "aff_unique_url": "https://www.facebook.com;https://www.dartmouth.edu", "aff_unique_abbr": "Facebook AI;Dartmouth", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Isometric Gaussian Process Latent Variable Model for Dissimilarity Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9023", "id": "9023", "proceeding": "http://proceedings.mlr.press/v139/jorgensen21a.html", "slides": "", "author_site": "Martin J\u00f8rgensen, S\u00f8ren Hauberg", "author": "Martin J\u00f8rgensen; Soren Hauberg", "abstract": "We present a probabilistic model where the latent variable respects both the distances and the topology of the modeled data. The model leverages the Riemannian geometry of the generated manifold to endow the latent space with a well-defined stochastic distance measure, which is modeled locally as Nakagami distributions. These stochastic distances are sought to be as similar as possible to observed distances along a neighborhood graph through a censoring process. The model is inferred by variational inference based on observations of pairwise distances. We demonstrate how the new model can encode invariances in the learned manifolds.", "bibtex": "@InProceedings{pmlr-v139-jorgensen21a,\n title = \t {Isometric Gaussian Process Latent Variable Model for Dissimilarity Data},\n author = {J{\\o}rgensen, Martin and Hauberg, Soren},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5127--5136},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jorgensen21a/jorgensen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jorgensen21a.html},\n abstract = \t {We present a probabilistic model where the latent variable respects both the distances and the topology of the modeled data. The model leverages the Riemannian geometry of the generated manifold to endow the latent space with a well-defined stochastic distance measure, which is modeled locally as Nakagami distributions. These stochastic distances are sought to be as similar as possible to observed distances along a neighborhood graph through a censoring process. The model is inferred by variational inference based on observations of pairwise distances. We demonstrate how the new model can encode invariances in the learned manifolds.}\n}", "pdf": "http://proceedings.mlr.press/v139/jorgensen21a/jorgensen21a.pdf", "supp": "", "pdf_size": 4193909, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=893446893934210907&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Engineering Science, University of Oxford; Department of Mathematics and Computer Science, Technical University of Denmark", "aff_domain": "robots.ox.ac.uk;dtu.dk", "email": "robots.ox.ac.uk;dtu.dk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jorgensen21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Oxford;Technical University of Denmark", "aff_unique_dep": "Department of Engineering Science;Department of Mathematics and Computer Science", "aff_unique_url": "https://www.ox.ac.uk;https://www.tu-dresden.de", "aff_unique_abbr": "Oxford;DTU", "aff_campus_unique_index": "0", "aff_campus_unique": "Oxford;", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Denmark" }, { "title": "Joining datasets via data augmentation in the label space for neural networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10343", "id": "10343", "proceeding": "http://proceedings.mlr.press/v139/zhao21b.html", "slides": "", "author_site": "Junbo Zhao, Mingfeng Ou, linji Xue, Yunkai Cui, Sai Wu, Gang Chen", "author": "Junbo Zhao; Mingfeng Ou; Linji Xue; Yunkai Cui; Sai Wu; Gang Chen", "abstract": "Most, if not all, modern deep learning systems restrict themselves to a single dataset for neural network training and inference. In this article, we are interested in systematic ways to join datasets that are made of similar purposes. Unlike previous published works that ubiquitously conduct the dataset joining in the uninterpretable latent vectorial space, the core to our method is an augmentation procedure in the label space. The primary challenge to address the label space for dataset joining is the discrepancy between labels: non-overlapping label annotation sets, different labeling granularity or hierarchy and etc. Notably we propose a new technique leveraging artificially created knowledge graph, recurrent neural networks and policy gradient that successfully achieve the dataset joining in the label space. Empirical results on both image and text classification justify the validity of our approach.", "bibtex": "@InProceedings{pmlr-v139-zhao21b,\n title = \t {Joining datasets via data augmentation in the label space for neural networks},\n author = {Zhao, Junbo and Ou, Mingfeng and Xue, Linji and Cui, Yunkai and Wu, Sai and Chen, Gang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12686--12696},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhao21b/zhao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhao21b.html},\n abstract = \t {Most, if not all, modern deep learning systems restrict themselves to a single dataset for neural network training and inference. In this article, we are interested in systematic ways to join datasets that are made of similar purposes. Unlike previous published works that ubiquitously conduct the dataset joining in the uninterpretable latent vectorial space, the core to our method is an augmentation procedure in the label space. The primary challenge to address the label space for dataset joining is the discrepancy between labels: non-overlapping label annotation sets, different labeling granularity or hierarchy and etc. Notably we propose a new technique leveraging artificially created knowledge graph, recurrent neural networks and policy gradient that successfully achieve the dataset joining in the label space. Empirical results on both image and text classification justify the validity of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhao21b/zhao21b.pdf", "supp": "", "pdf_size": 2081095, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7765982281490412895&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "College of Computer Science and Technology, Zhejiang University; Graviti Inc. + Department of Software Engineering, Tongji University; Graviti Inc.; Graviti Inc.; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", "aff_domain": "zju.edu.cn;graviti.com;graviti.com;graviti.com;zju.edu.cn;zju.edu.cn", "email": "zju.edu.cn;graviti.com;graviti.com;graviti.com;zju.edu.cn;zju.edu.cn", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhao21b.html", "aff_unique_index": "0;1+2;1;1;0;0", "aff_unique_norm": "Zhejiang University;Graviti Inc.;Tongji University", "aff_unique_dep": "College of Computer Science and Technology;;Department of Software Engineering", "aff_unique_url": "http://www.zju.edu.cn;https://www.graviti.com;https://www.tongji.edu.cn", "aff_unique_abbr": "ZJU;;Tongji", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1+0;1;1;0;0", "aff_country_unique": "China;United States" }, { "title": "Joint Online Learning and Decision-making via Dual Mirror Descent", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9831", "id": "9831", "proceeding": "http://proceedings.mlr.press/v139/lobos21a.html", "slides": "/media/icml-2021/Slides/9831.pdf", "author_site": "Alfonso Lobos Ruiz, Paul Grigas, Zheng Wen", "author": "Alfonso Lobos; Paul Grigas; Zheng Wen", "abstract": "We consider an online revenue maximization problem over a finite time horizon subject to lower and upper bounds on cost. At each period, an agent receives a context vector sampled i.i.d. from an unknown distribution and needs to make a decision adaptively. The revenue and cost functions depend on the context vector as well as some fixed but possibly unknown parameter vector to be learned. We propose a novel offline benchmark and a new algorithm that mixes an online dual mirror descent scheme with a generic parameter learning process. When the parameter vector is known, we demonstrate an $O(\\sqrt{T})$ regret result as well an $O(\\sqrt{T})$ bound on the possible constraint violations. When the parameter is not known and must be learned, we demonstrate that the regret and constraint violations are the sums of the previous $O(\\sqrt{T})$ terms plus terms that directly depend on the convergence of the learning process.", "bibtex": "@InProceedings{pmlr-v139-lobos21a,\n title = \t {Joint Online Learning and Decision-making via Dual Mirror Descent},\n author = {Lobos, Alfonso and Grigas, Paul and Wen, Zheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7080--7089},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lobos21a/lobos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lobos21a.html},\n abstract = \t {We consider an online revenue maximization problem over a finite time horizon subject to lower and upper bounds on cost. At each period, an agent receives a context vector sampled i.i.d. from an unknown distribution and needs to make a decision adaptively. The revenue and cost functions depend on the context vector as well as some fixed but possibly unknown parameter vector to be learned. We propose a novel offline benchmark and a new algorithm that mixes an online dual mirror descent scheme with a generic parameter learning process. When the parameter vector is known, we demonstrate an $O(\\sqrt{T})$ regret result as well an $O(\\sqrt{T})$ bound on the possible constraint violations. When the parameter is not known and must be learned, we demonstrate that the regret and constraint violations are the sums of the previous $O(\\sqrt{T})$ terms plus terms that directly depend on the convergence of the learning process.}\n}", "pdf": "http://proceedings.mlr.press/v139/lobos21a/lobos21a.pdf", "supp": "", "pdf_size": 333866, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8484781318073147264&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley; University of California, Berkeley; Google DeepMind, Mountain View, California", "aff_domain": "berkeley.edu;berkeley.edu;google.com", "email": "berkeley.edu;berkeley.edu;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lobos21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Berkeley;Google", "aff_unique_dep": ";Google DeepMind", "aff_unique_url": "https://www.berkeley.edu;https://deepmind.com", "aff_unique_abbr": "UC Berkeley;DeepMind", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Berkeley;Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Just How Toxic is Data Poisoning? A Unified Benchmark for Backdoor and Data Poisoning Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8473", "id": "8473", "proceeding": "http://proceedings.mlr.press/v139/schwarzschild21a.html", "slides": "", "author_site": "Avi Schwarzschild, Micah Goldblum, Arjun Gupta, John P Dickerson, Tom Goldstein", "author": "Avi Schwarzschild; Micah Goldblum; Arjun Gupta; John P Dickerson; Tom Goldstein", "abstract": "Data poisoning and backdoor attacks manipulate training data in order to cause models to fail during inference. A recent survey of industry practitioners found that data poisoning is the number one concern among threats ranging from model stealing to adversarial attacks. However, it remains unclear exactly how dangerous poisoning methods are and which ones are more effective considering that these methods, even ones with identical objectives, have not been tested in consistent or realistic settings. We observe that data poisoning and backdoor attacks are highly sensitive to variations in the testing setup. Moreover, we find that existing methods may not generalize to realistic settings. While these existing works serve as valuable prototypes for data poisoning, we apply rigorous tests to determine the extent to which we should fear them. In order to promote fair comparison in future work, we develop standardized benchmarks for data poisoning and backdoor attacks.", "bibtex": "@InProceedings{pmlr-v139-schwarzschild21a,\n title = \t {Just How Toxic is Data Poisoning? A Unified Benchmark for Backdoor and Data Poisoning Attacks},\n author = {Schwarzschild, Avi and Goldblum, Micah and Gupta, Arjun and Dickerson, John P and Goldstein, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9389--9398},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/schwarzschild21a/schwarzschild21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/schwarzschild21a.html},\n abstract = \t {Data poisoning and backdoor attacks manipulate training data in order to cause models to fail during inference. A recent survey of industry practitioners found that data poisoning is the number one concern among threats ranging from model stealing to adversarial attacks. However, it remains unclear exactly how dangerous poisoning methods are and which ones are more effective considering that these methods, even ones with identical objectives, have not been tested in consistent or realistic settings. We observe that data poisoning and backdoor attacks are highly sensitive to variations in the testing setup. Moreover, we find that existing methods may not generalize to realistic settings. While these existing works serve as valuable prototypes for data poisoning, we apply rigorous tests to determine the extent to which we should fear them. In order to promote fair comparison in future work, we develop standardized benchmarks for data poisoning and backdoor attacks.}\n}", "pdf": "http://proceedings.mlr.press/v139/schwarzschild21a/schwarzschild21a.pdf", "supp": "", "pdf_size": 541676, "gs_citation": 214, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15855049854905847899&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Mathematics; Department of Computer Science; Department of Robotics; Department of Computer Science; Department of Computer Science", "aff_domain": "umd.edu; ; ; ; ", "email": "umd.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/schwarzschild21a.html", "aff_unique_index": "0;1;2;1;1", "aff_unique_norm": "Mathematics Department;Unknown Institution;Robotics Department", "aff_unique_dep": "Department of Mathematics;Department of Computer Science;Department of Robotics", "aff_unique_url": ";;", "aff_unique_abbr": ";;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "", "aff_country_unique": "" }, { "title": "Just Train Twice: Improving Group Robustness without Training Group Information", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9147", "id": "9147", "proceeding": "http://proceedings.mlr.press/v139/liu21f.html", "slides": "/media/icml-2021/Slides/9147.pdf", "author_site": "Evan Liu, Behzad Haghgoo, Annie Chen, Aditi Raghunathan, Pang Wei Koh, Shiori Sagawa, Percy Liang, Chelsea Finn", "author": "Evan Z Liu; Behzad Haghgoo; Annie S Chen; Aditi Raghunathan; Pang Wei Koh; Shiori Sagawa; Percy Liang; Chelsea Finn", "abstract": "Standard training via empirical risk minimization (ERM) can produce models that achieve low error on average but high error on minority groups, especially in the presence of spurious correlations between the input and label. Prior approaches to this problem, like group distributionally robust optimization (group DRO), generally require group annotations for every training point. On the other hand, approaches that do not use group annotations generally do not improve minority performance. For example, we find that joint DRO, which dynamically upweights examples with high training loss, tends to optimize for examples that are irrelevant to the specific groups we seek to do well on. In this paper, we propose a simple two-stage approach, JTT, that achieves comparable performance to group DRO while only requiring group annotations on a significantly smaller validation set. JTT first attempts to identify informative training examples, which are often minority examples, by training an initial ERM classifier and selecting the examples with high training loss. Then, it trains a final classifier by upsampling the selected examples. Crucially, unlike joint DRO, JTT does not iteratively upsample examples that have high loss under the final classifier. On four image classification and natural language processing tasks with spurious correlations, we show that JTT closes 85% of the gap in accuracy on the worst group between ERM and group DRO.", "bibtex": "@InProceedings{pmlr-v139-liu21f,\n title = \t {Just Train Twice: Improving Group Robustness without Training Group Information},\n author = {Liu, Evan Z and Haghgoo, Behzad and Chen, Annie S and Raghunathan, Aditi and Koh, Pang Wei and Sagawa, Shiori and Liang, Percy and Finn, Chelsea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6781--6792},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21f/liu21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21f.html},\n abstract = \t {Standard training via empirical risk minimization (ERM) can produce models that achieve low error on average but high error on minority groups, especially in the presence of spurious correlations between the input and label. Prior approaches to this problem, like group distributionally robust optimization (group DRO), generally require group annotations for every training point. On the other hand, approaches that do not use group annotations generally do not improve minority performance. For example, we find that joint DRO, which dynamically upweights examples with high training loss, tends to optimize for examples that are irrelevant to the specific groups we seek to do well on. In this paper, we propose a simple two-stage approach, JTT, that achieves comparable performance to group DRO while only requiring group annotations on a significantly smaller validation set. JTT first attempts to identify informative training examples, which are often minority examples, by training an initial ERM classifier and selecting the examples with high training loss. Then, it trains a final classifier by upsampling the selected examples. Crucially, unlike joint DRO, JTT does not iteratively upsample examples that have high loss under the final classifier. On four image classification and natural language processing tasks with spurious correlations, we show that JTT closes 85% of the gap in accuracy on the worst group between ERM and group DRO.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21f/liu21f.pdf", "supp": "", "pdf_size": 1931515, "gs_citation": 630, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13173846618257909762&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu; ; ; ; ; ; ; ", "email": "cs.stanford.edu; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/liu21f.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "K-shot NAS: Learnable Weight-Sharing for NAS with K-shot Supernets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10731", "id": "10731", "proceeding": "http://proceedings.mlr.press/v139/su21a.html", "slides": "", "author_site": "Xiu Su, Shan You, Mingkai Zheng, Fei Wang, Chen Qian, Changshui Zhang, Chang Xu", "author": "Xiu Su; Shan You; Mingkai Zheng; Fei Wang; Chen Qian; Changshui Zhang; Chang Xu", "abstract": "In one-shot weight sharing for NAS, the weights of each operation (at each layer) are supposed to be identical for all architectures (paths) in the supernet. However, this rules out the possibility of adjusting operation weights to cater for different paths, which limits the reliability of the evaluation results. In this paper, instead of counting on a single supernet, we introduce $K$-shot supernets and take their weights for each operation as a dictionary. The operation weight for each path is represented as a convex combination of items in a dictionary with a simplex code. This enables a matrix approximation of the stand-alone weight matrix with a higher rank ($K>1$). A \\textit{simplex-net} is introduced to produce architecture-customized code for each path. As a result, all paths can adaptively learn how to share weights in the $K$-shot supernets and acquire corresponding weights for better evaluation. $K$-shot supernets and simplex-net can be iteratively trained, and we further extend the search to the channel dimension. Extensive experiments on benchmark datasets validate that K-shot NAS significantly improves the evaluation accuracy of paths and thus brings in impressive performance improvements.", "bibtex": "@InProceedings{pmlr-v139-su21a,\n title = \t {K-shot NAS: Learnable Weight-Sharing for NAS with K-shot Supernets},\n author = {Su, Xiu and You, Shan and Zheng, Mingkai and Wang, Fei and Qian, Chen and Zhang, Changshui and Xu, Chang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9880--9890},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/su21a/su21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/su21a.html},\n abstract = \t {In one-shot weight sharing for NAS, the weights of each operation (at each layer) are supposed to be identical for all architectures (paths) in the supernet. However, this rules out the possibility of adjusting operation weights to cater for different paths, which limits the reliability of the evaluation results. In this paper, instead of counting on a single supernet, we introduce $K$-shot supernets and take their weights for each operation as a dictionary. The operation weight for each path is represented as a convex combination of items in a dictionary with a simplex code. This enables a matrix approximation of the stand-alone weight matrix with a higher rank ($K>1$). A \\textit{simplex-net} is introduced to produce architecture-customized code for each path. As a result, all paths can adaptively learn how to share weights in the $K$-shot supernets and acquire corresponding weights for better evaluation. $K$-shot supernets and simplex-net can be iteratively trained, and we further extend the search to the channel dimension. Extensive experiments on benchmark datasets validate that K-shot NAS significantly improves the evaluation accuracy of paths and thus brings in impressive performance improvements.}\n}", "pdf": "http://proceedings.mlr.press/v139/su21a/su21a.pdf", "supp": "", "pdf_size": 500634, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6580774219763014138&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computer Science, Faculty of Engineering, The University of Sydney, Australia+SenseTime Research; SenseTime Research+Department of Automation, Tsinghua University, Institute for Arti\ufb01cial Intelligence, Tsinghua University (THUAI), Beijing National Research Center for Information Science and Technology (BNRist); Department of Automation, Tsinghua University, Institute for Arti\ufb01cial Intelligence, Tsinghua University (THUAI), Beijing National Research Center for Information Science and Technology (BNRist); Department of Automation, Tsinghua University, Institute for Arti\ufb01cial Intelligence, Tsinghua University (THUAI), Beijing National Research Center for Information Science and Technology (BNRist); Department of Automation, Tsinghua University, Institute for Arti\ufb01cial Intelligence, Tsinghua University (THUAI), Beijing National Research Center for Information Science and Technology (BNRist); Department of Automation, Tsinghua University, Institute for Arti\ufb01cial Intelligence, Tsinghua University (THUAI), Beijing National Research Center for Information Science and Technology (BNRist); School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", "aff_domain": ";sensetime.com; ; ; ; ; ", "email": ";sensetime.com; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/su21a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "University of Sydney;SenseTime;", "aff_unique_dep": "School of Computer Science;SenseTime Research;", "aff_unique_url": "https://www.sydney.edu.au;https://www.sensetime.com;", "aff_unique_abbr": "USYD;SenseTime;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;0", "aff_country_unique": "Australia;China;" }, { "title": "KD3A: Unsupervised Multi-Source Decentralized Domain Adaptation via Knowledge Distillation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9025", "id": "9025", "proceeding": "http://proceedings.mlr.press/v139/feng21f.html", "slides": "/media/icml-2021/Slides/9025.pdf", "author_site": "Haozhe Feng, Zhaoyang You, Minghao Chen, Tianye Zhang, Minfeng Zhu, Fei Wu, Chao Wu, Wei Chen", "author": "Haozhe Feng; Zhaoyang You; Minghao Chen; Tianye Zhang; Minfeng Zhu; Fei Wu; Chao Wu; Wei Chen", "abstract": "Conventional unsupervised multi-source domain adaptation (UMDA) methods assume all source domains can be accessed directly. However, this assumption neglects the privacy-preserving policy, where all the data and computations must be kept decentralized. There exist three challenges in this scenario: (1) Minimizing the domain distance requires the pairwise calculation of the data from the source and target domains, while the data on the source domain is not available. (2) The communication cost and privacy security limit the application of existing UMDA methods, such as the domain adversarial training. (3) Since users cannot govern the data quality, the irrelevant or malicious source domains are more likely to appear, which causes negative transfer. To address the above problems, we propose a privacy-preserving UMDA paradigm named Knowledge Distillation based Decentralized Domain Adaptation (KD3A), which performs domain adaptation through the knowledge distillation on models from different source domains. The extensive experiments show that KD3A significantly outperforms state-of-the-art UMDA approaches. Moreover, the KD3A is robust to the negative transfer and brings a 100x reduction of communication cost compared with other decentralized UMDA methods.", "bibtex": "@InProceedings{pmlr-v139-feng21f,\n title = \t {KD3A: Unsupervised Multi-Source Decentralized Domain Adaptation via Knowledge Distillation},\n author = {Feng, Haozhe and You, Zhaoyang and Chen, Minghao and Zhang, Tianye and Zhu, Minfeng and Wu, Fei and Wu, Chao and Chen, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3274--3283},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21f/feng21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21f.html},\n abstract = \t {Conventional unsupervised multi-source domain adaptation (UMDA) methods assume all source domains can be accessed directly. However, this assumption neglects the privacy-preserving policy, where all the data and computations must be kept decentralized. There exist three challenges in this scenario: (1) Minimizing the domain distance requires the pairwise calculation of the data from the source and target domains, while the data on the source domain is not available. (2) The communication cost and privacy security limit the application of existing UMDA methods, such as the domain adversarial training. (3) Since users cannot govern the data quality, the irrelevant or malicious source domains are more likely to appear, which causes negative transfer. To address the above problems, we propose a privacy-preserving UMDA paradigm named Knowledge Distillation based Decentralized Domain Adaptation (KD3A), which performs domain adaptation through the knowledge distillation on models from different source domains. The extensive experiments show that KD3A significantly outperforms state-of-the-art UMDA approaches. Moreover, the KD3A is robust to the negative transfer and brings a 100x reduction of communication cost compared with other decentralized UMDA methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21f/feng21f.pdf", "supp": "", "pdf_size": 7297110, "gs_citation": 135, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14984342689086286396&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China; College of Computer Science and Technology, Zhejiang University, Hangzhou, China; State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China; State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China; State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China; College of Computer Science and Technology, Zhejiang University, Hangzhou, China; School of Public Affairs, Zhejiang University, Hangzhou, China; State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "aff_domain": "; ; ; ; ; ; ;zju.edu.cn", "email": "; ; ; ; ; ; ;zju.edu.cn", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/feng21f.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Zhejiang University", "aff_unique_dep": "State Key Lab of CAD&CG", "aff_unique_url": "http://www.zju.edu.cn", "aff_unique_abbr": "ZJU", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "Hangzhou", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "KNAS: Green Neural Architecture Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9229", "id": "9229", "proceeding": "http://proceedings.mlr.press/v139/xu21m.html", "slides": "", "author_site": "Jingjing Xu, Liang Zhao, Junyang Lin, Rundong Gao, Xu SUN, Hongxia Yang", "author": "Jingjing Xu; Liang Zhao; Junyang Lin; Rundong Gao; Xu Sun; Hongxia Yang", "abstract": "Many existing neural architecture search (NAS) solutions rely on downstream training for architecture evaluation, which takes enormous computations. Considering that these computations bring a large carbon footprint, this paper aims to explore a green (namely environmental-friendly) NAS solution that evaluates architectures without training. Intuitively, gradients, induced by the architecture itself, directly decide the convergence and generalization results. It motivates us to propose the gradient kernel hypothesis: Gradients can be used as a coarse-grained proxy of downstream training to evaluate random-initialized networks. To support the hypothesis, we conduct a theoretical analysis and find a practical gradient kernel that has good correlations with training loss and validation performance. According to this hypothesis, we propose a new kernel based architecture search approach KNAS. Experiments show that KNAS achieves competitive results with orders of magnitude faster than \u201ctrain-then-test\u201d paradigms on image classification tasks. Furthermore, the extremely low search cost enables its wide applications. The searched network also outperforms strong baseline RoBERTA-large on two text classification tasks.", "bibtex": "@InProceedings{pmlr-v139-xu21m,\n title = \t {KNAS: Green Neural Architecture Search},\n author = {Xu, Jingjing and Zhao, Liang and Lin, Junyang and Gao, Rundong and Sun, Xu and Yang, Hongxia},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11613--11625},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21m/xu21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21m.html},\n abstract = \t {Many existing neural architecture search (NAS) solutions rely on downstream training for architecture evaluation, which takes enormous computations. Considering that these computations bring a large carbon footprint, this paper aims to explore a green (namely environmental-friendly) NAS solution that evaluates architectures without training. Intuitively, gradients, induced by the architecture itself, directly decide the convergence and generalization results. It motivates us to propose the gradient kernel hypothesis: Gradients can be used as a coarse-grained proxy of downstream training to evaluate random-initialized networks. To support the hypothesis, we conduct a theoretical analysis and find a practical gradient kernel that has good correlations with training loss and validation performance. According to this hypothesis, we propose a new kernel based architecture search approach KNAS. Experiments show that KNAS achieves competitive results with orders of magnitude faster than \u201ctrain-then-test\u201d paradigms on image classification tasks. Furthermore, the extremely low search cost enables its wide applications. The searched network also outperforms strong baseline RoBERTA-large on two text classification tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21m/xu21m.pdf", "supp": "", "pdf_size": 2699567, "gs_citation": 85, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=636730090425787241&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "MOE Key Lab of Computational Linguistics, School of EECS, Peking University+Center for Data Science, Peking University; Center for Data Science, Peking University; Alibaba Group; MOE Key Lab of Computational Linguistics, School of EECS, Peking University+Center for Data Science, Peking University; MOE Key Lab of Computational Linguistics, School of EECS, Peking University+Center for Data Science, Peking University; Alibaba Group", "aff_domain": "pku.edu.cn;pku.edu.cn; ; ; ; ", "email": "pku.edu.cn;pku.edu.cn; ; ; ; ", "github": "https://github.com/Jingjing-NLP/KNAS", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/xu21m.html", "aff_unique_index": "0+0;0;1;0+0;0+0;1", "aff_unique_norm": "Peking University;Alibaba Group", "aff_unique_dep": "School of EECS;", "aff_unique_url": "http://www.pku.edu.cn;https://www.alibaba.com", "aff_unique_abbr": "PKU;Alibaba", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0+0;0;0;0+0;0+0;0", "aff_country_unique": "China" }, { "title": "KO codes: inventing nonlinear encoding and decoding for reliable wireless communication via deep-learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8623", "id": "8623", "proceeding": "http://proceedings.mlr.press/v139/makkuva21a.html", "slides": "/media/icml-2021/Slides/8623.pdf", "author_site": "Ashok Vardhan Makkuva, Xiyang Liu, Mohammad Vahid Jamali, Hessam Mahdavifar, Sewoong Oh, Pramod Viswanath", "author": "Ashok V Makkuva; Xiyang Liu; Mohammad Vahid Jamali; Hessam Mahdavifar; Sewoong Oh; Pramod Viswanath", "abstract": "Landmark codes underpin reliable physical layer communication, e.g., Reed-Muller, BCH, Convolution, Turbo, LDPC, and Polar codes: each is a linear code and represents a mathematical breakthrough. The impact on humanity is huge: each of these codes has been used in global wireless communication standards (satellite, WiFi, cellular). Reliability of communication over the classical additive white Gaussian noise (AWGN) channel enables benchmarking and ranking of the different codes. In this paper, we construct KO codes, a computationally efficient family of deep-learning driven (encoder, decoder) pairs that outperform the state-of-the-art reliability performance on the standardized AWGN channel. KO codes beat state-of-the-art Reed-Muller and Polar codes, under the low-complexity successive cancellation decoding, in the challenging short-to-medium block length regime on the AWGN channel. We show that the gains of KO codes are primarily due to the nonlinear mapping of information bits directly to transmit symbols (bypassing modulation) and yet possess an efficient, high-performance decoder. The key technical innovation that renders this possible is design of a novel family of neural architectures inspired by the computation tree of the {\\bf K}ronecker {\\bf O}peration (KO) central to Reed-Muller and Polar codes. These architectures pave way for the discovery of a much richer class of hitherto unexplored nonlinear algebraic structures.", "bibtex": "@InProceedings{pmlr-v139-makkuva21a,\n title = \t {KO codes: inventing nonlinear encoding and decoding for reliable wireless communication via deep-learning},\n author = {Makkuva, Ashok V and Liu, Xiyang and Jamali, Mohammad Vahid and Mahdavifar, Hessam and Oh, Sewoong and Viswanath, Pramod},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7368--7378},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/makkuva21a/makkuva21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/makkuva21a.html},\n abstract = \t {Landmark codes underpin reliable physical layer communication, e.g., Reed-Muller, BCH, Convolution, Turbo, LDPC, and Polar codes: each is a linear code and represents a mathematical breakthrough. The impact on humanity is huge: each of these codes has been used in global wireless communication standards (satellite, WiFi, cellular). Reliability of communication over the classical additive white Gaussian noise (AWGN) channel enables benchmarking and ranking of the different codes. In this paper, we construct KO codes, a computationally efficient family of deep-learning driven (encoder, decoder) pairs that outperform the state-of-the-art reliability performance on the standardized AWGN channel. KO codes beat state-of-the-art Reed-Muller and Polar codes, under the low-complexity successive cancellation decoding, in the challenging short-to-medium block length regime on the AWGN channel. We show that the gains of KO codes are primarily due to the nonlinear mapping of information bits directly to transmit symbols (bypassing modulation) and yet possess an efficient, high-performance decoder. The key technical innovation that renders this possible is design of a novel family of neural architectures inspired by the computation tree of the {\\bf K}ronecker {\\bf O}peration (KO) central to Reed-Muller and Polar codes. These architectures pave way for the discovery of a much richer class of hitherto unexplored nonlinear algebraic structures.}\n}", "pdf": "http://proceedings.mlr.press/v139/makkuva21a/makkuva21a.pdf", "supp": "", "pdf_size": 3098015, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6409739785381196000&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, University of Illinois at Urbana-Champaign; Paul G. Allen School of Computer Science & Engineering, University of Washington; Department of Electrical Engineerign and Computer Science, University of Michigan; Department of Electrical Engineerign and Computer Science, University of Michigan; Paul G. Allen School of Computer Science & Engineering, University of Washington; Department of Electrical and Computer Engineering, University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu;cs.washington.edu; ; ; ;", "email": "illinois.edu;cs.washington.edu; ; ; ;", "github": "https://github.com/deepcomm/KOcodes", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/makkuva21a.html", "aff_unique_index": "0;1;2;2;1;0", "aff_unique_norm": "University of Illinois Urbana-Champaign;University of Washington;University of Michigan", "aff_unique_dep": "Department of Electrical and Computer Engineering;Paul G. Allen School of Computer Science & Engineering;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://illinois.edu;https://www.washington.edu;https://www.umich.edu", "aff_unique_abbr": "UIUC;UW;UM", "aff_campus_unique_index": "0;1;2;2;1;0", "aff_campus_unique": "Urbana-Champaign;Seattle;Ann Arbor", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Kernel Continual Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10451", "id": "10451", "proceeding": "http://proceedings.mlr.press/v139/derakhshani21a.html", "slides": "", "author_site": "Mohammad Mahdi Derakhshani, Xiantong Zhen, Ling Shao, Cees Snoek", "author": "Mohammad Mahdi Derakhshani; Xiantong Zhen; Ling Shao; Cees Snoek", "abstract": "This paper introduces kernel continual learning, a simple but effective variant of continual learning that leverages the non-parametric nature of kernel methods to tackle catastrophic forgetting. We deploy an episodic memory unit that stores a subset of samples for each task to learn task-specific classifiers based on kernel ridge regression. This does not require memory replay and systematically avoids task interference in the classifiers. We further introduce variational random features to learn a data-driven kernel for each task. To do so, we formulate kernel continual learning as a variational inference problem, where a random Fourier basis is incorporated as the latent variable. The variational posterior distribution over the random Fourier basis is inferred from the coreset of each task. In this way, we are able to generate more informative kernels specific to each task, and, more importantly, the coreset size can be reduced to achieve more compact memory, resulting in more efficient continual learning based on episodic memory. Extensive evaluation on four benchmarks demonstrates the effectiveness and promise of kernels for continual learning.", "bibtex": "@InProceedings{pmlr-v139-derakhshani21a,\n title = \t {Kernel Continual Learning},\n author = {Derakhshani, Mohammad Mahdi and Zhen, Xiantong and Shao, Ling and Snoek, Cees},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2621--2631},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/derakhshani21a/derakhshani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/derakhshani21a.html},\n abstract = \t {This paper introduces kernel continual learning, a simple but effective variant of continual learning that leverages the non-parametric nature of kernel methods to tackle catastrophic forgetting. We deploy an episodic memory unit that stores a subset of samples for each task to learn task-specific classifiers based on kernel ridge regression. This does not require memory replay and systematically avoids task interference in the classifiers. We further introduce variational random features to learn a data-driven kernel for each task. To do so, we formulate kernel continual learning as a variational inference problem, where a random Fourier basis is incorporated as the latent variable. The variational posterior distribution over the random Fourier basis is inferred from the coreset of each task. In this way, we are able to generate more informative kernels specific to each task, and, more importantly, the coreset size can be reduced to achieve more compact memory, resulting in more efficient continual learning based on episodic memory. Extensive evaluation on four benchmarks demonstrates the effectiveness and promise of kernels for continual learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/derakhshani21a/derakhshani21a.pdf", "supp": "", "pdf_size": 548836, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16309190237334513251&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "AIM Lab, University of Amsterdam, The Netherlands+Inception Institute of Arti\ufb01cial Intelligence, UAE; AIM Lab, University of Amsterdam, The Netherlands+Inception Institute of Arti\ufb01cial Intelligence, UAE; Inception Institute of Arti\ufb01cial Intelligence, UAE; AIM Lab, University of Amsterdam, The Netherlands", "aff_domain": "uva.nl;uva.nl; ; ", "email": "uva.nl;uva.nl; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/derakhshani21a.html", "aff_unique_index": "0+1;0+1;1;0", "aff_unique_norm": "University of Amsterdam;Inception Institute of Artificial Intelligence", "aff_unique_dep": "AIM Lab;", "aff_unique_url": "https://www.uva.nl;https://www.inceptioniai.org", "aff_unique_abbr": "UvA;", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+1;1;0", "aff_country_unique": "Netherlands;United Arab Emirates" }, { "title": "Kernel Stein Discrepancy Descent", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9273", "id": "9273", "proceeding": "http://proceedings.mlr.press/v139/korba21a.html", "slides": "/media/icml-2021/Slides/9273.pdf", "author_site": "Anna Korba, Pierre-Cyril Aubin-Frankowski, Szymon Majewski, Pierre Ablin", "author": "Anna Korba; Pierre-Cyril Aubin-Frankowski; Szymon Majewski; Pierre Ablin", "abstract": "Among dissimilarities between probability distributions, the Kernel Stein Discrepancy (KSD) has received much interest recently. We investigate the properties of its Wasserstein gradient flow to approximate a target probability distribution $\\pi$ on $\\mathbb{R}^d$, known up to a normalization constant. This leads to a straightforwardly implementable, deterministic score-based method to sample from $\\pi$, named KSD Descent, which uses a set of particles to approximate $\\pi$. Remarkably, owing to a tractable loss function, KSD Descent can leverage robust parameter-free optimization schemes such as L-BFGS; this contrasts with other popular particle-based schemes such as the Stein Variational Gradient Descent algorithm. We study the convergence properties of KSD Descent and demonstrate its practical relevance. However, we also highlight failure cases by showing that the algorithm can get stuck in spurious local minima.", "bibtex": "@InProceedings{pmlr-v139-korba21a,\n title = \t {Kernel Stein Discrepancy Descent},\n author = {Korba, Anna and Aubin-Frankowski, Pierre-Cyril and Majewski, Szymon and Ablin, Pierre},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5719--5730},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/korba21a/korba21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/korba21a.html},\n abstract = \t {Among dissimilarities between probability distributions, the Kernel Stein Discrepancy (KSD) has received much interest recently. We investigate the properties of its Wasserstein gradient flow to approximate a target probability distribution $\\pi$ on $\\mathbb{R}^d$, known up to a normalization constant. This leads to a straightforwardly implementable, deterministic score-based method to sample from $\\pi$, named KSD Descent, which uses a set of particles to approximate $\\pi$. Remarkably, owing to a tractable loss function, KSD Descent can leverage robust parameter-free optimization schemes such as L-BFGS; this contrasts with other popular particle-based schemes such as the Stein Variational Gradient Descent algorithm. We study the convergence properties of KSD Descent and demonstrate its practical relevance. However, we also highlight failure cases by showing that the algorithm can get stuck in spurious local minima.}\n}", "pdf": "http://proceedings.mlr.press/v139/korba21a/korba21a.pdf", "supp": "", "pdf_size": 1565573, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5389096233704622104&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "CREST, ENSAE, Institut Polytechnique de Paris; CAS, MINES ParisTech, Paris, France; CMAP, Ecole Polytechnique, Institut Polytechnique de Paris; CNRS and DMA, Ecole Normale Sup\u00e9rieure, Paris, France", "aff_domain": "ensae.fr; ; ; ", "email": "ensae.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/korba21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Institut Polytechnique de Paris;MINES ParisTech;Ecole Polytechnique;Ecole Normale Sup\u00e9rieure", "aff_unique_dep": "CREST;CAS;CMAP;CNRS and DMA", "aff_unique_url": "https://www.ipparis.fr;https://www.minesparistech.fr;https://www.ecolepolytechnique.fr;https://www.ens.fr", "aff_unique_abbr": "IP Paris;MPT;Polytechnique;ENS", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Paris", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "France" }, { "title": "Kernel-Based Reinforcement Learning: A Finite-Time Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8573", "id": "8573", "proceeding": "http://proceedings.mlr.press/v139/domingues21a.html", "slides": "", "author_site": "Omar Darwiche Domingues, Pierre Menard, Matteo Pirotta, Emilie Kaufmann, Michal Valko", "author": "Omar Darwiche Domingues; Pierre Menard; Matteo Pirotta; Emilie Kaufmann; Michal Valko", "abstract": "We consider the exploration-exploitation dilemma in finite-horizon reinforcement learning problems whose state-action space is endowed with a metric. We introduce Kernel-UCBVI, a model-based optimistic algorithm that leverages the smoothness of the MDP and a non-parametric kernel estimator of the rewards and transitions to efficiently balance exploration and exploitation. For problems with $K$ episodes and horizon $H$, we provide a regret bound of $\\widetilde{O}\\left( H^3 K^{\\frac{2d}{2d+1}}\\right)$, where $d$ is the covering dimension of the joint state-action space. This is the first regret bound for kernel-based RL using smoothing kernels, which requires very weak assumptions on the MDP and applies to a wide range of tasks. We empirically validate our approach in continuous MDPs with sparse rewards.", "bibtex": "@InProceedings{pmlr-v139-domingues21a,\n title = \t {Kernel-Based Reinforcement Learning: A Finite-Time Analysis},\n author = {Domingues, Omar Darwiche and Menard, Pierre and Pirotta, Matteo and Kaufmann, Emilie and Valko, Michal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2783--2792},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/domingues21a/domingues21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/domingues21a.html},\n abstract = \t {We consider the exploration-exploitation dilemma in finite-horizon reinforcement learning problems whose state-action space is endowed with a metric. We introduce Kernel-UCBVI, a model-based optimistic algorithm that leverages the smoothness of the MDP and a non-parametric kernel estimator of the rewards and transitions to efficiently balance exploration and exploitation. For problems with $K$ episodes and horizon $H$, we provide a regret bound of $\\widetilde{O}\\left( H^3 K^{\\frac{2d}{2d+1}}\\right)$, where $d$ is the covering dimension of the joint state-action space. This is the first regret bound for kernel-based RL using smoothing kernels, which requires very weak assumptions on the MDP and applies to a wide range of tasks. We empirically validate our approach in continuous MDPs with sparse rewards.}\n}", "pdf": "http://proceedings.mlr.press/v139/domingues21a/domingues21a.pdf", "supp": "", "pdf_size": 5325484, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1350124438767928735&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Inria Lille+Universit\u00e9 de Lille; Otto von Guericke University; Facebook AI Research, Paris; Inria Lille+CNRS; Inria Lille+DeepMind Paris", "aff_domain": "inria.fr; ; ; ; ", "email": "inria.fr; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/domingues21a.html", "aff_unique_index": "0+1;2;3;0+4;0+5", "aff_unique_norm": "INRIA;Universit\u00e9 de Lille;Otto von Guericke University Magdeburg;Meta;Centre National de la Recherche Scientifique;DeepMind", "aff_unique_dep": ";;;Facebook AI Research;;", "aff_unique_url": "https://www.inria.fr;https://www.univ-lille.fr;https://www.ovgu.de;https://research.facebook.com;https://www.cnrs.fr;https://deepmind.com", "aff_unique_abbr": "Inria;UdeL;OVGU;FAIR;CNRS;DeepMind", "aff_campus_unique_index": "0;2;0;0+2", "aff_campus_unique": "Lille;;Paris", "aff_country_unique_index": "0+0;1;0;0+0;0+0", "aff_country_unique": "France;Germany" }, { "title": "Keyframe-Focused Visual Imitation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10701", "id": "10701", "proceeding": "http://proceedings.mlr.press/v139/wen21d.html", "slides": "", "author_site": "Chuan Wen, Jierui Lin, Jianing Qian, Yang Gao, Dinesh Jayaraman", "author": "Chuan Wen; Jierui Lin; Jianing Qian; Yang Gao; Dinesh Jayaraman", "abstract": "Imitation learning trains control policies by mimicking pre-recorded expert demonstrations. In partially observable settings, imitation policies must rely on observation histories, but many seemingly paradoxical results show better performance for policies that only access the most recent observation. Recent solutions ranging from causal graph learning to deep information bottlenecks have shown promising results, but failed to scale to realistic settings such as visual imitation. We propose a solution that outperforms these prior approaches by upweighting demonstration keyframes corresponding to expert action changepoints. This simple approach easily scales to complex visual imitation settings. Our experimental results demonstrate consistent performance improvements over all baselines on image-based Gym MuJoCo continuous control tasks. Finally, on the CARLA photorealistic vision-based urban driving simulator, we resolve a long-standing issue in behavioral cloning for driving by demonstrating effective imitation from observation histories. Supplementary materials and code at: \\url{https://tinyurl.com/imitation-keyframes}.", "bibtex": "@InProceedings{pmlr-v139-wen21d,\n title = \t {Keyframe-Focused Visual Imitation Learning},\n author = {Wen, Chuan and Lin, Jierui and Qian, Jianing and Gao, Yang and Jayaraman, Dinesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11123--11133},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wen21d/wen21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/wen21d.html},\n abstract = \t {Imitation learning trains control policies by mimicking pre-recorded expert demonstrations. In partially observable settings, imitation policies must rely on observation histories, but many seemingly paradoxical results show better performance for policies that only access the most recent observation. Recent solutions ranging from causal graph learning to deep information bottlenecks have shown promising results, but failed to scale to realistic settings such as visual imitation. We propose a solution that outperforms these prior approaches by upweighting demonstration keyframes corresponding to expert action changepoints. This simple approach easily scales to complex visual imitation settings. Our experimental results demonstrate consistent performance improvements over all baselines on image-based Gym MuJoCo continuous control tasks. Finally, on the CARLA photorealistic vision-based urban driving simulator, we resolve a long-standing issue in behavioral cloning for driving by demonstrating effective imitation from observation histories. Supplementary materials and code at: \\url{https://tinyurl.com/imitation-keyframes}.}\n}", "pdf": "http://proceedings.mlr.press/v139/wen21d/wen21d.pdf", "supp": "", "pdf_size": 9395751, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15887131149800456429&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "https://tinyurl.com/imitation-keyframes", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wen21d.html" }, { "title": "Knowledge Enhanced Machine Learning Pipeline against Diverse Adversarial Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9789", "id": "9789", "proceeding": "http://proceedings.mlr.press/v139/gurel21a.html", "slides": "/media/icml-2021/Slides/9789.pdf", "author_site": "Nezihe Merve G\u00fcrel, Xiangyu Qi, Luka Rimanic, Ce Zhang, Bo Li", "author": "Nezihe Merve G\u00fcrel; Xiangyu Qi; Luka Rimanic; Ce Zhang; Bo Li", "abstract": "Despite the great successes achieved by deep neural networks (DNNs), recent studies show that they are vulnerable against adversarial examples, which aim to mislead DNNs by adding small adversarial perturbations. Several defenses have been proposed against such attacks, while many of them have been adaptively attacked. In this work, we aim to enhance the ML robustness from a different perspective by leveraging domain knowledge: We propose a Knowledge Enhanced Machine Learning Pipeline (KEMLP) to integrate domain knowledge (i.e., logic relationships among different predictions) into a probabilistic graphical model via first-order logic rules. In particular, we develop KEMLP by integrating a diverse set of weak auxiliary models based on their logical relationships to the main DNN model that performs the target task. Theoretically, we provide convergence results and prove that, under mild conditions, the prediction of KEMLP is more robust than that of the main DNN model. Empirically, we take road sign recognition as an example and leverage the relationships between road signs and their shapes and contents as domain knowledge. We show that compared with adversarial training and other baselines, KEMLP achieves higher robustness against physical attacks, $\\mathcal{L}_p$ bounded attacks, unforeseen attacks, and natural corruptions under both whitebox and blackbox settings, while still maintaining high clean accuracy.", "bibtex": "@InProceedings{pmlr-v139-gurel21a,\n title = \t {Knowledge Enhanced Machine Learning Pipeline against Diverse Adversarial Attacks},\n author = {G{\\\"u}rel, Nezihe Merve and Qi, Xiangyu and Rimanic, Luka and Zhang, Ce and Li, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3976--3987},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gurel21a/gurel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gurel21a.html},\n abstract = \t {Despite the great successes achieved by deep neural networks (DNNs), recent studies show that they are vulnerable against adversarial examples, which aim to mislead DNNs by adding small adversarial perturbations. Several defenses have been proposed against such attacks, while many of them have been adaptively attacked. In this work, we aim to enhance the ML robustness from a different perspective by leveraging domain knowledge: We propose a Knowledge Enhanced Machine Learning Pipeline (KEMLP) to integrate domain knowledge (i.e., logic relationships among different predictions) into a probabilistic graphical model via first-order logic rules. In particular, we develop KEMLP by integrating a diverse set of weak auxiliary models based on their logical relationships to the main DNN model that performs the target task. Theoretically, we provide convergence results and prove that, under mild conditions, the prediction of KEMLP is more robust than that of the main DNN model. Empirically, we take road sign recognition as an example and leverage the relationships between road signs and their shapes and contents as domain knowledge. We show that compared with adversarial training and other baselines, KEMLP achieves higher robustness against physical attacks, $\\mathcal{L}_p$ bounded attacks, unforeseen attacks, and natural corruptions under both whitebox and blackbox settings, while still maintaining high clean accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/gurel21a/gurel21a.pdf", "supp": "", "pdf_size": 842875, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7636701886743640050&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "ETH Zurich, Zurich, Switzerland; Zhejiang University, China (work done during remote internship at UIUC) + ETH Zurich, Zurich, Switzerland; ETH Zurich, Zurich, Switzerland; ETH Zurich, Zurich, Switzerland; University of Illinois at Urbana-Champaign, Illinois, USA", "aff_domain": "inf.ethz.ch;zju.edu.cn; ;inf.ethz.ch;illinois.edu", "email": "inf.ethz.ch;zju.edu.cn; ;inf.ethz.ch;illinois.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/gurel21a.html", "aff_unique_index": "0;1+0;0;0;2", "aff_unique_norm": "ETH Zurich;Zhejiang University;University of Illinois Urbana-Champaign", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ethz.ch;http://www.zju.edu.cn;https://illinois.edu", "aff_unique_abbr": "ETHZ;ZJU;UIUC", "aff_campus_unique_index": "0;0;0;0;2", "aff_campus_unique": "Zurich;;Urbana-Champaign", "aff_country_unique_index": "0;1+0;0;0;2", "aff_country_unique": "Switzerland;China;United States" }, { "title": "LAMDA: Label Matching Deep Domain Adaptation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10721", "id": "10721", "proceeding": "http://proceedings.mlr.press/v139/le21a.html", "slides": "", "author_site": "Trung Le, Tuan Nguyen, Nhat Ho, Hung Bui, Dinh Phung", "author": "Trung Le; Tuan Nguyen; Nhat Ho; Hung Bui; Dinh Phung", "abstract": "Deep domain adaptation (DDA) approaches have recently been shown to perform better than their shallow rivals with better modeling capacity on complex domains (e.g., image, structural data, and sequential data). The underlying idea is to learn domain invariant representations on a latent space that can bridge the gap between source and target domains. Several theoretical studies have established insightful understanding and the benefit of learning domain invariant features; however, they are usually limited to the case where there is no label shift, hence hindering its applicability. In this paper, we propose and study a new challenging setting that allows us to use a Wasserstein distance (WS) to not only quantify the data shift but also to define the label shift directly. We further develop a theory to demonstrate that minimizing the WS of the data shift leads to closing the gap between the source and target data distributions on the latent space (e.g., an intermediate layer of a deep net), while still being able to quantify the label shift with respect to this latent space. Interestingly, our theory can consequently explain certain drawbacks of learning domain invariant features on the latent space. Finally, grounded on the results and guidance of our developed theory, we propose the Label Matching Deep Domain Adaptation (LAMDA) approach that outperforms baselines on real-world datasets for DA problems.", "bibtex": "@InProceedings{pmlr-v139-le21a,\n title = \t {LAMDA: Label Matching Deep Domain Adaptation},\n author = {Le, Trung and Nguyen, Tuan and Ho, Nhat and Bui, Hung and Phung, Dinh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6043--6054},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/le21a/le21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/le21a.html},\n abstract = \t {Deep domain adaptation (DDA) approaches have recently been shown to perform better than their shallow rivals with better modeling capacity on complex domains (e.g., image, structural data, and sequential data). The underlying idea is to learn domain invariant representations on a latent space that can bridge the gap between source and target domains. Several theoretical studies have established insightful understanding and the benefit of learning domain invariant features; however, they are usually limited to the case where there is no label shift, hence hindering its applicability. In this paper, we propose and study a new challenging setting that allows us to use a Wasserstein distance (WS) to not only quantify the data shift but also to define the label shift directly. We further develop a theory to demonstrate that minimizing the WS of the data shift leads to closing the gap between the source and target data distributions on the latent space (e.g., an intermediate layer of a deep net), while still being able to quantify the label shift with respect to this latent space. Interestingly, our theory can consequently explain certain drawbacks of learning domain invariant features on the latent space. Finally, grounded on the results and guidance of our developed theory, we propose the Label Matching Deep Domain Adaptation (LAMDA) approach that outperforms baselines on real-world datasets for DA problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/le21a/le21a.pdf", "supp": "", "pdf_size": 1109134, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1031422864725798765&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Data Science and AI, Monash University, Australia; Department of Data Science and AI, Monash University, Australia; University of Texas, Austin, USA; VinAI Research, Vietnam; Department of Data Science and AI, Monash University, Australia + VinAI Research, Vietnam", "aff_domain": "monash.edu; ; ; ; ", "email": "monash.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/le21a.html", "aff_unique_index": "0;0;1;2;0+2", "aff_unique_norm": "Monash University;University of Texas at Austin;VinAI Research", "aff_unique_dep": "Department of Data Science and AI;;", "aff_unique_url": "https://www.monash.edu;https://www.utexas.edu;https://www.vin.ai", "aff_unique_abbr": "Monash;UT Austin;VinAI", "aff_campus_unique_index": "1;", "aff_campus_unique": ";Austin", "aff_country_unique_index": "0;0;1;2;0+2", "aff_country_unique": "Australia;United States;Vietnam" }, { "title": "LARNet: Lie Algebra Residual Network for Face Recognition", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9233", "id": "9233", "proceeding": "http://proceedings.mlr.press/v139/yang21d.html", "slides": "/media/icml-2021/Slides/9233.pdf", "author_site": "Xiaolong Yang, Xiaohong Jia, Dihong Gong, Dong-Ming Yan, Zhifeng Li, Wei Liu", "author": "Xiaolong Yang; Xiaohong Jia; Dihong Gong; Dong-Ming Yan; Zhifeng Li; Wei Liu", "abstract": "Face recognition is an important yet challenging problem in computer vision. A major challenge in practical face recognition applications lies in significant variations between profile and frontal faces. Traditional techniques address this challenge either by synthesizing frontal faces or by pose invariant learning. In this paper, we propose a novel method with Lie algebra theory to explore how face rotation in the 3D space affects the deep feature generation process of convolutional neural networks (CNNs). We prove that face rotation in the image space is equivalent to an additive residual component in the feature space of CNNs, which is determined solely by the rotation. Based on this theoretical finding, we further design a Lie Algebraic Residual Network (LARNet) for tackling pose robust face recognition. Our LARNet consists of a residual subnet for decoding rotation information from input face images, and a gating subnet to learn rotation magnitude for controlling the strength of the residual component contributing to the feature learning process. Comprehensive experimental evaluations on both frontal-profile face datasets and general face recognition datasets convincingly demonstrate that our method consistently outperforms the state-of-the-art ones.", "bibtex": "@InProceedings{pmlr-v139-yang21d,\n title = \t {LARNet: Lie Algebra Residual Network for Face Recognition},\n author = {Yang, Xiaolong and Jia, Xiaohong and Gong, Dihong and Yan, Dong-Ming and Li, Zhifeng and Liu, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11738--11750},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21d/yang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21d.html},\n abstract = \t {Face recognition is an important yet challenging problem in computer vision. A major challenge in practical face recognition applications lies in significant variations between profile and frontal faces. Traditional techniques address this challenge either by synthesizing frontal faces or by pose invariant learning. In this paper, we propose a novel method with Lie algebra theory to explore how face rotation in the 3D space affects the deep feature generation process of convolutional neural networks (CNNs). We prove that face rotation in the image space is equivalent to an additive residual component in the feature space of CNNs, which is determined solely by the rotation. Based on this theoretical finding, we further design a Lie Algebraic Residual Network (LARNet) for tackling pose robust face recognition. Our LARNet consists of a residual subnet for decoding rotation information from input face images, and a gating subnet to learn rotation magnitude for controlling the strength of the residual component contributing to the feature learning process. Comprehensive experimental evaluations on both frontal-profile face datasets and general face recognition datasets convincingly demonstrate that our method consistently outperforms the state-of-the-art ones.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21d/yang21d.pdf", "supp": "", "pdf_size": 2098057, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16559921103444475954&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, P.R.China+University of Chinese Academy of Sciences, Beijing, P.R.China+Tencent Data Platform, P.R.China; Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, P.R.China+University of Chinese Academy of Sciences, Beijing, P.R.China; Tencent Data Platform, P.R.China; NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, P.R.China+University of Chinese Academy of Sciences, Beijing, P.R.China; Tencent Data Platform, P.R.China; Tencent Data Platform, P.R.China", "aff_domain": "amss.ac.cn;tencent.com;columbia.edu; ; ; ", "email": "amss.ac.cn;tencent.com;columbia.edu; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yang21d.html", "aff_unique_index": "0+1+2;0+1;2;0+1;2;2", "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tencent", "aff_unique_dep": "Academy of Mathematics and Systems Science;;Tencent Data Platform", "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.tencent.com", "aff_unique_abbr": "CAS;UCAS;Tencent", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "Beijing;", "aff_country_unique_index": "0+0+0;0+0;0;0+0;0;0", "aff_country_unique": "China" }, { "title": "LEGO: Latent Execution-Guided Reasoning for Multi-Hop Question Answering on Knowledge Graphs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8925", "id": "8925", "proceeding": "http://proceedings.mlr.press/v139/ren21a.html", "slides": "", "author_site": "Hongyu Ren, Hanjun Dai, Bo Dai, Xinyun Chen, Michihiro Yasunaga, Haitian Sun, Dale Schuurmans, Jure Leskovec, Denny Zhou", "author": "Hongyu Ren; Hanjun Dai; Bo Dai; Xinyun Chen; Michihiro Yasunaga; Haitian Sun; Dale Schuurmans; Jure Leskovec; Denny Zhou", "abstract": "Answering complex natural language questions on knowledge graphs (KGQA) is a challenging task. It requires reasoning with the input natural language questions as well as a massive, incomplete heterogeneous KG. Prior methods obtain an abstract structured query graph/tree from the input question and traverse the KG for answers following the query tree. However, they inherently cannot deal with missing links in the KG. Here we present LEGO, a Latent Execution-Guided reasOning framework to handle this challenge in KGQA. LEGO works in an iterative way, which alternates between (1) a Query Synthesizer, which synthesizes a reasoning action and grows the query tree step-by-step, and (2) a Latent Space Executor that executes the reasoning action in the latent embedding space to combat against the missing information in KG. To learn the synthesizer without step-wise supervision, we design a generic latent execution guided bottom-up search procedure to find good execution traces efficiently in the vast query space. Experimental results on several KGQA benchmarks demonstrate the effectiveness of our framework compared with previous state of the art.", "bibtex": "@InProceedings{pmlr-v139-ren21a,\n title = \t {LEGO: Latent Execution-Guided Reasoning for Multi-Hop Question Answering on Knowledge Graphs},\n author = {Ren, Hongyu and Dai, Hanjun and Dai, Bo and Chen, Xinyun and Yasunaga, Michihiro and Sun, Haitian and Schuurmans, Dale and Leskovec, Jure and Zhou, Denny},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8959--8970},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ren21a/ren21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ren21a.html},\n abstract = \t {Answering complex natural language questions on knowledge graphs (KGQA) is a challenging task. It requires reasoning with the input natural language questions as well as a massive, incomplete heterogeneous KG. Prior methods obtain an abstract structured query graph/tree from the input question and traverse the KG for answers following the query tree. However, they inherently cannot deal with missing links in the KG. Here we present LEGO, a Latent Execution-Guided reasOning framework to handle this challenge in KGQA. LEGO works in an iterative way, which alternates between (1) a Query Synthesizer, which synthesizes a reasoning action and grows the query tree step-by-step, and (2) a Latent Space Executor that executes the reasoning action in the latent embedding space to combat against the missing information in KG. To learn the synthesizer without step-wise supervision, we design a generic latent execution guided bottom-up search procedure to find good execution traces efficiently in the vast query space. Experimental results on several KGQA benchmarks demonstrate the effectiveness of our framework compared with previous state of the art.}\n}", "pdf": "http://proceedings.mlr.press/v139/ren21a/ren21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10628954854741072734&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;;;;;;;", "aff_domain": ";;;;;;;;", "email": ";;;;;;;;", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/ren21a.html" }, { "title": "LIME: Learning Inductive Bias for Primitives of Mathematical Reasoning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9435", "id": "9435", "proceeding": "http://proceedings.mlr.press/v139/wu21c.html", "slides": "", "author_site": "Yuhuai Wu, Markus Rabe, Wenda Li, Jimmy Ba, Roger Grosse, Christian Szegedy", "author": "Yuhuai Wu; Markus N Rabe; Wenda Li; Jimmy Ba; Roger B Grosse; Christian Szegedy", "abstract": "While designing inductive bias in neural architectures has been widely studied, we hypothesize that transformer networks are flexible enough to learn inductive bias from suitable generic tasks. Here, we replace architecture engineering by encoding inductive bias in the form of datasets. Inspired by Peirce\u2019s view that deduction, induction, and abduction are the primitives of reasoning, we design three synthetic tasks that are intended to require the model to have these three abilities. We specifically design these tasks to be synthetic and devoid of mathematical knowledge to ensure that only the fundamental reasoning biases can be learned from these tasks. This defines a new pre-training methodology called \"LIME\" (Learning Inductive bias for Mathematical rEasoning). Models trained with LIME significantly outperform vanilla transformers on four very different large mathematical reasoning benchmarks. Unlike dominating the computation cost as traditional pre-training approaches, LIME requires only a small fraction of the computation cost of the typical downstream task. The code for generating LIME tasks is available at https://github.com/tonywu95/LIME.", "bibtex": "@InProceedings{pmlr-v139-wu21c,\n title = \t {LIME: Learning Inductive Bias for Primitives of Mathematical Reasoning},\n author = {Wu, Yuhuai and Rabe, Markus N and Li, Wenda and Ba, Jimmy and Grosse, Roger B and Szegedy, Christian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11251--11262},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21c/wu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21c.html},\n abstract = \t {While designing inductive bias in neural architectures has been widely studied, we hypothesize that transformer networks are flexible enough to learn inductive bias from suitable generic tasks. Here, we replace architecture engineering by encoding inductive bias in the form of datasets. Inspired by Peirce\u2019s view that deduction, induction, and abduction are the primitives of reasoning, we design three synthetic tasks that are intended to require the model to have these three abilities. We specifically design these tasks to be synthetic and devoid of mathematical knowledge to ensure that only the fundamental reasoning biases can be learned from these tasks. This defines a new pre-training methodology called \"LIME\" (Learning Inductive bias for Mathematical rEasoning). Models trained with LIME significantly outperform vanilla transformers on four very different large mathematical reasoning benchmarks. Unlike dominating the computation cost as traditional pre-training approaches, LIME requires only a small fraction of the computation cost of the typical downstream task. The code for generating LIME tasks is available at https://github.com/tonywu95/LIME.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21c/wu21c.pdf", "supp": "", "pdf_size": 296155, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6631886312737976055&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Toronto, Toronto, Canada+Vector Institute, Toronto, Canada; Google Research; University of Cambridge, Cambridge, UK; University of Toronto, Toronto, Canada+Vector Institute, Toronto, Canada; University of Toronto, Toronto, Canada+Vector Institute, Toronto, Canada; Google Research", "aff_domain": "cs.toronto.edu; ; ; ; ; ", "email": "cs.toronto.edu; ; ; ; ; ", "github": "https://github.com/tonywu95/LIME", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/wu21c.html", "aff_unique_index": "0+1;2;3;0+1;0+1;2", "aff_unique_norm": "University of Toronto;Vector Institute;Google;University of Cambridge", "aff_unique_dep": ";;Google Research;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai;https://research.google;https://www.cam.ac.uk", "aff_unique_abbr": "U of T;Vector Institute;Google Research;Cambridge", "aff_campus_unique_index": "0+0;1;2;0+0;0+0;1", "aff_campus_unique": "Toronto;Mountain View;Cambridge", "aff_country_unique_index": "0+0;1;2;0+0;0+0;1", "aff_country_unique": "Canada;United States;United Kingdom" }, { "title": "LTL2Action: Generalizing LTL Instructions for Multi-Task RL", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9191", "id": "9191", "proceeding": "http://proceedings.mlr.press/v139/vaezipoor21a.html", "slides": "/media/icml-2021/Slides/9191_KcqXUSP.pdf", "author_site": "Pashootan Vaezipoor, Andrew C Li, Rodrigo A Toro Icarte, Sheila McIlraith", "author": "Pashootan Vaezipoor; Andrew C Li; Rodrigo A Toro Icarte; Sheila A. Mcilraith", "abstract": "We address the problem of teaching a deep reinforcement learning (RL) agent to follow instructions in multi-task environments. Instructions are expressed in a well-known formal language {\u2013} linear temporal logic (LTL) {\u2013} and can specify a diversity of complex, temporally extended behaviours, including conditionals and alternative realizations. Our proposed learning approach exploits the compositional syntax and the semantics of LTL, enabling our RL agent to learn task-conditioned policies that generalize to new instructions, not observed during training. To reduce the overhead of learning LTL semantics, we introduce an environment-agnostic LTL pretraining scheme which improves sample-efficiency in downstream environments. Experiments on discrete and continuous domains target combinatorial task sets of up to $\\sim10^{39}$ unique tasks and demonstrate the strength of our approach in learning to solve (unseen) tasks, given LTL instructions.", "bibtex": "@InProceedings{pmlr-v139-vaezipoor21a,\n title = \t {LTL2Action: Generalizing LTL Instructions for Multi-Task RL},\n author = {Vaezipoor, Pashootan and Li, Andrew C and Icarte, Rodrigo A Toro and Mcilraith, Sheila A.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10497--10508},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vaezipoor21a/vaezipoor21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vaezipoor21a.html},\n abstract = \t {We address the problem of teaching a deep reinforcement learning (RL) agent to follow instructions in multi-task environments. Instructions are expressed in a well-known formal language {\u2013} linear temporal logic (LTL) {\u2013} and can specify a diversity of complex, temporally extended behaviours, including conditionals and alternative realizations. Our proposed learning approach exploits the compositional syntax and the semantics of LTL, enabling our RL agent to learn task-conditioned policies that generalize to new instructions, not observed during training. To reduce the overhead of learning LTL semantics, we introduce an environment-agnostic LTL pretraining scheme which improves sample-efficiency in downstream environments. Experiments on discrete and continuous domains target combinatorial task sets of up to $\\sim10^{39}$ unique tasks and demonstrate the strength of our approach in learning to solve (unseen) tasks, given LTL instructions.}\n}", "pdf": "http://proceedings.mlr.press/v139/vaezipoor21a/vaezipoor21a.pdf", "supp": "", "pdf_size": 1867859, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14511888964718858114&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of Toronto+Vector Institute for Artificial Intelligence+Schwartz Reisman Institute for Technology and Society; Department of Computer Science, University of Toronto+Vector Institute for Artificial Intelligence+Schwartz Reisman Institute for Technology and Society; Department of Computer Science, University of Toronto+Vector Institute for Artificial Intelligence; Department of Computer Science, University of Toronto+Vector Institute for Artificial Intelligence+Schwartz Reisman Institute for Technology and Society", "aff_domain": "cs.toronto.edu;cs.toronto.edu; ; ", "email": "cs.toronto.edu;cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/vaezipoor21a.html", "aff_unique_index": "0+1+2;0+1+2;0+1;0+1+2", "aff_unique_norm": "University of Toronto;Vector Institute for Artificial Intelligence;Schwartz Reisman Institute for Technology and Society", "aff_unique_dep": "Department of Computer Science;;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.schwartzreisman.ca", "aff_unique_abbr": "U of T;Vector Institute;", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0+0+0;0+0+0;0+0;0+0+0", "aff_country_unique": "Canada" }, { "title": "Label Distribution Learning Machine", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10653", "id": "10653", "proceeding": "http://proceedings.mlr.press/v139/wang21h.html", "slides": "", "author_site": "Jing Wang, Xin Geng", "author": "Jing Wang; Xin Geng", "abstract": "Although Label Distribution Learning (LDL) has witnessed extensive classification applications, it faces the challenge of objective mismatch \u2013 the objective of LDL mismatches that of classification, which has seldom been noticed in existing studies. Our goal is to solve the objective mismatch and improve the classification performance of LDL. Specifically, we extend the margin theory to LDL and propose a new LDL method called \\textbf{L}abel \\textbf{D}istribution \\textbf{L}earning \\textbf{M}achine (LDLM). First, we define the label distribution margin and propose the \\textbf{S}upport \\textbf{V}ector \\textbf{R}egression \\textbf{M}achine (SVRM) to learn the optimal label. Second, we propose the adaptive margin loss to learn label description degrees. In theoretical analysis, we develop a generalization theory for the SVRM and analyze the generalization of LDLM. Experimental results validate the better classification performance of LDLM.", "bibtex": "@InProceedings{pmlr-v139-wang21h,\n title = \t {Label Distribution Learning Machine},\n author = {Wang, Jing and Geng, Xin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10749--10759},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21h/wang21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21h.html},\n abstract = \t {Although Label Distribution Learning (LDL) has witnessed extensive classification applications, it faces the challenge of objective mismatch \u2013 the objective of LDL mismatches that of classification, which has seldom been noticed in existing studies. Our goal is to solve the objective mismatch and improve the classification performance of LDL. Specifically, we extend the margin theory to LDL and propose a new LDL method called \\textbf{L}abel \\textbf{D}istribution \\textbf{L}earning \\textbf{M}achine (LDLM). First, we define the label distribution margin and propose the \\textbf{S}upport \\textbf{V}ector \\textbf{R}egression \\textbf{M}achine (SVRM) to learn the optimal label. Second, we propose the adaptive margin loss to learn label description degrees. In theoretical analysis, we develop a generalization theory for the SVRM and analyze the generalization of LDLM. Experimental results validate the better classification performance of LDLM.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21h/wang21h.pdf", "supp": "", "pdf_size": 4798561, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15438935826062670511&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "School of Computer Science and Engineering, Southeast University, Nanjing, China+Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education; School of Computer Science and Engineering, Southeast University, Nanjing, China+Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education", "aff_domain": "seu.edu.cn;seu.edu.cn", "email": "seu.edu.cn;seu.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wang21h.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "Southeast University", "aff_unique_dep": "School of Computer Science and Engineering", "aff_unique_url": "https://www.seu.edu.cn/", "aff_unique_abbr": "SEU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nanjing;", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "China" }, { "title": "Label Inference Attacks from Log-loss Scores", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9893", "id": "9893", "proceeding": "http://proceedings.mlr.press/v139/aggarwal21a.html", "slides": "/media/icml-2021/Slides/9893.pdf", "author_site": "Abhinav Aggarwal, Shiva Kasiviswanathan, Zekun Xu, Oluwaseyi Feyisetan, Nathanael Teissier", "author": "Abhinav Aggarwal; Shiva Kasiviswanathan; Zekun Xu; Oluwaseyi Feyisetan; Nathanael Teissier", "abstract": "Log-loss (also known as cross-entropy loss) metric is ubiquitously used across machine learning applications to assess the performance of classification algorithms. In this paper, we investigate the problem of inferring the labels of a dataset from single (or multiple) log-loss score(s), without any other access to the dataset. Surprisingly, we show that for any finite number of label classes, it is possible to accurately infer the labels of the dataset from the reported log-loss score of a single carefully constructed prediction vector if we allow arbitrary precision arithmetic. Additionally, we present label inference algorithms (attacks) that succeed even under addition of noise to the log-loss scores and under limited precision arithmetic. All our algorithms rely on ideas from number theory and combinatorics and require no model training. We run experimental simulations on some real datasets to demonstrate the ease of running these attacks in practice.", "bibtex": "@InProceedings{pmlr-v139-aggarwal21a,\n title = \t {Label Inference Attacks from Log-loss Scores},\n author = {Aggarwal, Abhinav and Kasiviswanathan, Shiva and Xu, Zekun and Feyisetan, Oluwaseyi and Teissier, Nathanael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {120--129},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/aggarwal21a/aggarwal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/aggarwal21a.html},\n abstract = \t {Log-loss (also known as cross-entropy loss) metric is ubiquitously used across machine learning applications to assess the performance of classification algorithms. In this paper, we investigate the problem of inferring the labels of a dataset from single (or multiple) log-loss score(s), without any other access to the dataset. Surprisingly, we show that for any finite number of label classes, it is possible to accurately infer the labels of the dataset from the reported log-loss score of a single carefully constructed prediction vector if we allow arbitrary precision arithmetic. Additionally, we present label inference algorithms (attacks) that succeed even under addition of noise to the log-loss scores and under limited precision arithmetic. All our algorithms rely on ideas from number theory and combinatorics and require no model training. We run experimental simulations on some real datasets to demonstrate the ease of running these attacks in practice.}\n}", "pdf": "http://proceedings.mlr.press/v139/aggarwal21a/aggarwal21a.pdf", "supp": "", "pdf_size": 1332501, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16257740500152934002&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Amazon; Amazon; Amazon; Amazon; Amazon", "aff_domain": "amazon.com;amazon.com; ; ; ", "email": "amazon.com;amazon.com; ; ; ", "github": "", "project": "https://www.kaggle.com/; https://www.kdd.org/kdd-cup", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/aggarwal21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Amazon", "aff_unique_dep": "Amazon.com, Inc.", "aff_unique_url": "https://www.amazon.com", "aff_unique_abbr": "Amazon", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Label-Only Membership Inference Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9415", "id": "9415", "proceeding": "http://proceedings.mlr.press/v139/choquette-choo21a.html", "slides": "", "author_site": "Christopher Choquette-Choo, Florian Tramer, Nicholas Carlini, Nicolas Papernot", "author": "Christopher A. Choquette-Choo; Florian Tramer; Nicholas Carlini; Nicolas Papernot", "abstract": "Membership inference is one of the simplest privacy threats faced by machine learning models that are trained on private sensitive data. In this attack, an adversary infers whether a particular point was used to train the model, or not, by observing the model\u2019s predictions. Whereas current attack methods all require access to the model\u2019s predicted confidence score, we introduce a label-only attack that instead evaluates the robustness of the model\u2019s predicted (hard) labels under perturbations of the input, to infer membership. Our label-only attack is not only as-effective as attacks requiring access to confidence scores, it also demonstrates that a class of defenses against membership inference, which we call \u201cconfidence masking\u201d because they obfuscate the confidence scores to thwart attacks, are insufficient to prevent the leakage of private information. Our experiments show that training with differential privacy or strong L2 regularization are the only current defenses that meaningfully decrease leakage of private information, even for points that are outliers of the training distribution.", "bibtex": "@InProceedings{pmlr-v139-choquette-choo21a,\n title = \t {Label-Only Membership Inference Attacks},\n author = {Choquette-Choo, Christopher A. and Tramer, Florian and Carlini, Nicholas and Papernot, Nicolas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1964--1974},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/choquette-choo21a/choquette-choo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/choquette-choo21a.html},\n abstract = \t {Membership inference is one of the simplest privacy threats faced by machine learning models that are trained on private sensitive data. In this attack, an adversary infers whether a particular point was used to train the model, or not, by observing the model\u2019s predictions. Whereas current attack methods all require access to the model\u2019s predicted confidence score, we introduce a label-only attack that instead evaluates the robustness of the model\u2019s predicted (hard) labels under perturbations of the input, to infer membership. Our label-only attack is not only as-effective as attacks requiring access to confidence scores, it also demonstrates that a class of defenses against membership inference, which we call \u201cconfidence masking\u201d because they obfuscate the confidence scores to thwart attacks, are insufficient to prevent the leakage of private information. Our experiments show that training with differential privacy or strong L2 regularization are the only current defenses that meaningfully decrease leakage of private information, even for points that are outliers of the training distribution.}\n}", "pdf": "http://proceedings.mlr.press/v139/choquette-choo21a/choquette-choo21a.pdf", "supp": "", "pdf_size": 5723983, "gs_citation": 620, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18421653793757811360&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "University of Toronto and Vector Institute; Stanford University; Google; University of Toronto and Vector Institute", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/choquette-choo21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of Toronto;Stanford University;Google", "aff_unique_dep": ";;Google", "aff_unique_url": "https://www.utoronto.ca;https://www.stanford.edu;https://www.google.com", "aff_unique_abbr": "U of T;Stanford;Google", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "Toronto;Stanford;Mountain View", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "Canada;United States" }, { "title": "Large Scale Private Learning via Low-rank Reparametrization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8901", "id": "8901", "proceeding": "http://proceedings.mlr.press/v139/yu21f.html", "slides": "/media/icml-2021/Slides/8901.pdf", "author_site": "Da Yu, Huishuai Zhang, Wei Chen, Jian Yin, Tie-Yan Liu", "author": "Da Yu; Huishuai Zhang; Wei Chen; Jian Yin; Tie-Yan Liu", "abstract": "We propose a reparametrization scheme to address the challenges of applying differentially private SGD on large neural networks, which are 1) the huge memory cost of storing individual gradients, 2) the added noise suffering notorious dimensional dependence. Specifically, we reparametrize each weight matrix with two \\emph{gradient-carrier} matrices of small dimension and a \\emph{residual weight} matrix. We argue that such reparametrization keeps the forward/backward process unchanged while enabling us to compute the projected gradient without computing the gradient itself. To learn with differential privacy, we design \\emph{reparametrized gradient perturbation (RGP)} that perturbs the gradients on gradient-carrier matrices and reconstructs an update for the original weight from the noisy gradients. Importantly, we use historical updates to find the gradient-carrier matrices, whose optimality is rigorously justified under linear regression and empirically verified with deep learning tasks. RGP significantly reduces the memory cost and improves the utility. For example, we are the first able to apply differential privacy on the BERT model and achieve an average accuracy of $83.9%$ on four downstream tasks with $\\epsilon=8$, which is within $5%$ loss compared to the non-private baseline but enjoys much lower privacy leakage risk.", "bibtex": "@InProceedings{pmlr-v139-yu21f,\n title = \t {Large Scale Private Learning via Low-rank Reparametrization},\n author = {Yu, Da and Zhang, Huishuai and Chen, Wei and Yin, Jian and Liu, Tie-Yan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12208--12218},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21f/yu21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21f.html},\n abstract = \t {We propose a reparametrization scheme to address the challenges of applying differentially private SGD on large neural networks, which are 1) the huge memory cost of storing individual gradients, 2) the added noise suffering notorious dimensional dependence. Specifically, we reparametrize each weight matrix with two \\emph{gradient-carrier} matrices of small dimension and a \\emph{residual weight} matrix. We argue that such reparametrization keeps the forward/backward process unchanged while enabling us to compute the projected gradient without computing the gradient itself. To learn with differential privacy, we design \\emph{reparametrized gradient perturbation (RGP)} that perturbs the gradients on gradient-carrier matrices and reconstructs an update for the original weight from the noisy gradients. Importantly, we use historical updates to find the gradient-carrier matrices, whose optimality is rigorously justified under linear regression and empirically verified with deep learning tasks. RGP significantly reduces the memory cost and improves the utility. For example, we are the first able to apply differential privacy on the BERT model and achieve an average accuracy of $83.9%$ on four downstream tasks with $\\epsilon=8$, which is within $5%$ loss compared to the non-private baseline but enjoys much lower privacy leakage risk.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21f/yu21f.pdf", "supp": "", "pdf_size": 482310, "gs_citation": 118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10646842759761842433&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The School of Data and Computer Science & Guangdong Key Laboratory of Big Data Analysis and Processing, Sun Yat-sen University, Guangdong, China + Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; The School of Data and Computer Science & Guangdong Key Laboratory of Big Data Analysis and Processing, Sun Yat-sen University, Guangdong, China + Microsoft Research Asia; Microsoft Research Asia", "aff_domain": "example.com;example.com;microsoft.com;mail.sysu.edu.cn;example.com", "email": "example.com;example.com;microsoft.com;mail.sysu.edu.cn;example.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yu21f.html", "aff_unique_index": "0+1;1;1;0+1;1", "aff_unique_norm": "Sun Yat-sen University;Microsoft", "aff_unique_dep": "School of Data and Computer Science;Research", "aff_unique_url": "http://www.sysu.edu.cn;https://www.microsoft.com/en-us/research/group/asia", "aff_unique_abbr": "SYSU;MSR Asia", "aff_campus_unique_index": "0+1;1;1;0+1;1", "aff_campus_unique": "Guangdong;Asia", "aff_country_unique_index": "0+0;0;0;0+0;0", "aff_country_unique": "China" }, { "title": "Large-Margin Contrastive Learning with Distance Polarization Regularizer", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8679", "id": "8679", "proceeding": "http://proceedings.mlr.press/v139/chen21n.html", "slides": "", "author_site": "Shuo Chen, Gang Niu, Chen Gong, Jun Li, Jian Yang, Masashi Sugiyama", "author": "Shuo Chen; Gang Niu; Chen Gong; Jun Li; Jian Yang; Masashi Sugiyama", "abstract": "\\emph{Contrastive learning}\u00a0(CL) pretrains models in a pairwise manner, where given a data point, other data points are all regarded as dissimilar, including some that are \\emph{semantically} similar. The issue has been addressed by properly weighting similar and dissimilar pairs as in \\emph{positive-unlabeled learning}, so that the objective of CL is \\emph{unbiased} and CL is \\emph{consistent}. However, in this paper, we argue that this great solution is still not enough: its weighted objective \\emph{hides} the issue where the semantically similar pairs are still pushed away; as CL is pretraining, this phenomenon is not our desideratum and might affect downstream tasks. To this end, we propose \\emph{large-margin contrastive learning}\u00a0(LMCL) with \\emph{distance polarization regularizer}, motivated by the distribution characteristic of pairwise distances in \\emph{metric learning}. In LMCL, we can distinguish between \\emph{intra-cluster} and \\emph{inter-cluster} pairs, and then only push away inter-cluster pairs, which \\emph{solves} the above issue explicitly. Theoretically, we prove a tighter error bound for LMCL; empirically, the superiority of LMCL is demonstrated across multiple domains, \\emph{i.e.}, image classification, sentence representation, and reinforcement learning.", "bibtex": "@InProceedings{pmlr-v139-chen21n,\n title = \t {Large-Margin Contrastive Learning with Distance Polarization Regularizer},\n author = {Chen, Shuo and Niu, Gang and Gong, Chen and Li, Jun and Yang, Jian and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1673--1683},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21n/chen21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21n.html},\n abstract = \t {\\emph{Contrastive learning}\u00a0(CL) pretrains models in a pairwise manner, where given a data point, other data points are all regarded as dissimilar, including some that are \\emph{semantically} similar. The issue has been addressed by properly weighting similar and dissimilar pairs as in \\emph{positive-unlabeled learning}, so that the objective of CL is \\emph{unbiased} and CL is \\emph{consistent}. However, in this paper, we argue that this great solution is still not enough: its weighted objective \\emph{hides} the issue where the semantically similar pairs are still pushed away; as CL is pretraining, this phenomenon is not our desideratum and might affect downstream tasks. To this end, we propose \\emph{large-margin contrastive learning}\u00a0(LMCL) with \\emph{distance polarization regularizer}, motivated by the distribution characteristic of pairwise distances in \\emph{metric learning}. In LMCL, we can distinguish between \\emph{intra-cluster} and \\emph{inter-cluster} pairs, and then only push away inter-cluster pairs, which \\emph{solves} the above issue explicitly. Theoretically, we prove a tighter error bound for LMCL; empirically, the superiority of LMCL is demonstrated across multiple domains, \\emph{i.e.}, image classification, sentence representation, and reinforcement learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21n/chen21n.pdf", "supp": "", "pdf_size": 933531, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7939483572476488318&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "RIKEN Center for Advanced Intelligence Project, Japan; PCA-Lab, School of Computer Science and Engineering, Nanjing University of Science and Technology, China; PCA-Lab, School of Computer Science and Engineering, Nanjing University of Science and Technology, China; PCA-Lab, School of Computer Science and Engineering, Nanjing University of Science and Technology, China; PCA-Lab, School of Computer Science and Engineering, Nanjing University of Science and Technology, China; Graduate School of Frontier Sciences, The University of Tokyo, Japan", "aff_domain": "riken.jp; ; ;njust.edu.cn; ; ", "email": "riken.jp; ; ;njust.edu.cn; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/chen21n.html", "aff_unique_index": "0;1;1;1;1;2", "aff_unique_norm": "RIKEN Center for Advanced Intelligence Project;Nanjing University of Science and Technology;University of Tokyo", "aff_unique_dep": "Center for Advanced Intelligence Project;School of Computer Science and Engineering;Graduate School of Frontier Sciences", "aff_unique_url": "https://www.riken.jp/en/c-aip/;;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "RIKEN C-AIP;;UTokyo", "aff_campus_unique_index": "1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0;1;1;1;1;0", "aff_country_unique": "Japan;China" }, { "title": "Large-Scale Meta-Learning with Continual Trajectory Shifting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8939", "id": "8939", "proceeding": "http://proceedings.mlr.press/v139/shin21a.html", "slides": "/media/icml-2021/Slides/8939_Iwu9Ej0.pdf", "author_site": "JaeWoong Shin, Hae Beom Lee, Boqing Gong, Sung Ju Hwang", "author": "Jaewoong Shin; Hae Beom Lee; Boqing Gong; Sung Ju Hwang", "abstract": "Meta-learning of shared initialization parameters has shown to be highly effective in solving few-shot learning tasks. However, extending the framework to many-shot scenarios, which may further enhance its practicality, has been relatively overlooked due to the technical difficulties of meta-learning over long chains of inner-gradient steps. In this paper, we first show that allowing the meta-learners to take a larger number of inner gradient steps better captures the structure of heterogeneous and large-scale task distributions, thus results in obtaining better initialization points. Further, in order to increase the frequency of meta-updates even with the excessively long inner-optimization trajectories, we propose to estimate the required shift of the task-specific parameters with respect to the change of the initialization parameters. By doing so, we can arbitrarily increase the frequency of meta-updates and thus greatly improve the meta-level convergence as well as the quality of the learned initializations. We validate our method on a heterogeneous set of large-scale tasks, and show that the algorithm largely outperforms the previous first-order meta-learning methods in terms of both generalization performance and convergence, as well as multi-task learning and fine-tuning baselines.", "bibtex": "@InProceedings{pmlr-v139-shin21a,\n title = \t {Large-Scale Meta-Learning with Continual Trajectory Shifting},\n author = {Shin, Jaewoong and Lee, Hae Beom and Gong, Boqing and Hwang, Sung Ju},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9603--9613},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shin21a/shin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shin21a.html},\n abstract = \t {Meta-learning of shared initialization parameters has shown to be highly effective in solving few-shot learning tasks. However, extending the framework to many-shot scenarios, which may further enhance its practicality, has been relatively overlooked due to the technical difficulties of meta-learning over long chains of inner-gradient steps. In this paper, we first show that allowing the meta-learners to take a larger number of inner gradient steps better captures the structure of heterogeneous and large-scale task distributions, thus results in obtaining better initialization points. Further, in order to increase the frequency of meta-updates even with the excessively long inner-optimization trajectories, we propose to estimate the required shift of the task-specific parameters with respect to the change of the initialization parameters. By doing so, we can arbitrarily increase the frequency of meta-updates and thus greatly improve the meta-level convergence as well as the quality of the learned initializations. We validate our method on a heterogeneous set of large-scale tasks, and show that the algorithm largely outperforms the previous first-order meta-learning methods in terms of both generalization performance and convergence, as well as multi-task learning and fine-tuning baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/shin21a/shin21a.pdf", "supp": "", "pdf_size": 9241070, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1485442812434478690&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Graduate School of AI, KAIST, South Korea; Graduate School of AI, KAIST, South Korea; Google, LA; AITRICS, South Korea", "aff_domain": "kaist.ac.kr; ; ;kaist.ac.kr", "email": "kaist.ac.kr; ; ;kaist.ac.kr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/shin21a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "KAIST;Google;AITRICS", "aff_unique_dep": "Graduate School of AI;Google;", "aff_unique_url": "https://www.kaist.edu;https://www.google.com;", "aff_unique_abbr": "KAIST;Google;", "aff_campus_unique_index": "1", "aff_campus_unique": ";Los Angeles", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "South Korea;United States" }, { "title": "Large-Scale Multi-Agent Deep FBSDEs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9007", "id": "9007", "proceeding": "http://proceedings.mlr.press/v139/chen21t.html", "slides": "", "author_site": "Tianrong Chen, Ziyi Wang, Ioannis Exarchos, Evangelos Theodorou", "author": "Tianrong Chen; Ziyi O Wang; Ioannis Exarchos; Evangelos Theodorou", "abstract": "In this paper we present a scalable deep learning framework for finding Markovian Nash Equilibria in multi-agent stochastic games using fictitious play. The motivation is inspired by theoretical analysis of Forward Backward Stochastic Differential Equations and their implementation in a deep learning setting, which is the source of our algorithm\u2019s sample efficiency improvement. By taking advantage of the permutation-invariant property of agents in symmetric games, the scalability and performance is further enhanced significantly. We showcase superior performance of our framework over the state-of-the-art deep fictitious play algorithm on an inter-bank lending/borrowing problem in terms of multiple metrics. More importantly, our approach scales up to 3000 agents in simulation, a scale which, to the best of our knowledge, represents a new state-of-the-art. We also demonstrate the applicability of our framework in robotics on a belief space autonomous racing problem.", "bibtex": "@InProceedings{pmlr-v139-chen21t,\n title = \t {Large-Scale Multi-Agent Deep FBSDEs},\n author = {Chen, Tianrong and Wang, Ziyi O and Exarchos, Ioannis and Theodorou, Evangelos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1740--1748},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21t/chen21t.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21t.html},\n abstract = \t {In this paper we present a scalable deep learning framework for finding Markovian Nash Equilibria in multi-agent stochastic games using fictitious play. The motivation is inspired by theoretical analysis of Forward Backward Stochastic Differential Equations and their implementation in a deep learning setting, which is the source of our algorithm\u2019s sample efficiency improvement. By taking advantage of the permutation-invariant property of agents in symmetric games, the scalability and performance is further enhanced significantly. We showcase superior performance of our framework over the state-of-the-art deep fictitious play algorithm on an inter-bank lending/borrowing problem in terms of multiple metrics. More importantly, our approach scales up to 3000 agents in simulation, a scale which, to the best of our knowledge, represents a new state-of-the-art. We also demonstrate the applicability of our framework in robotics on a belief space autonomous racing problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21t/chen21t.pdf", "supp": "", "pdf_size": 4914666, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5541727749142379893&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Electrical and Computer Engineering, Georgia Institute of Technology, Atlanta, USA+Center for Machine Learning, Georgia Institute of Technology, Atlanta, USA+School of Aerospace Engineering, Georgia Institute of Technology, Atlanta, USA; Center for Machine Learning, Georgia Institute of Technology, Atlanta, USA+School of Aerospace Engineering, Georgia Institute of Technology, Atlanta, USA; Department of Computer Science, Stanford University, Stanford, USA; School of Aerospace Engineering, Georgia Institute of Technology, Atlanta, USA", "aff_domain": "gatech.edu; ; ; ", "email": "gatech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chen21t.html", "aff_unique_index": "0+0+0;0+0;1;0", "aff_unique_norm": "Georgia Institute of Technology;Stanford University", "aff_unique_dep": "School of Electrical and Computer Engineering;Department of Computer Science", "aff_unique_url": "https://www.gatech.edu;https://www.stanford.edu", "aff_unique_abbr": "Georgia Tech;Stanford", "aff_campus_unique_index": "0+0+0;0+0;1;0", "aff_campus_unique": "Atlanta;Stanford", "aff_country_unique_index": "0+0+0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Latent Programmer: Discrete Latent Codes for Program Synthesis", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9563", "id": "9563", "proceeding": "http://proceedings.mlr.press/v139/hong21a.html", "slides": "/media/icml-2021/Slides/9563.pdf", "author_site": "Joey Hong, David Dohan, Rishabh Singh, Charles Sutton, Manzil Zaheer", "author": "Joey Hong; David Dohan; Rishabh Singh; Charles Sutton; Manzil Zaheer", "abstract": "A key problem in program synthesis is searching over the large space of possible programs. Human programmers might decide the high-level structure of the desired program before thinking about the details; motivated by this intuition, we consider two-level search for program synthesis, in which the synthesizer first generates a plan, a sequence of symbols that describes the desired program at a high level, before generating the program. We propose to learn representations of programs that can act as plans to organize such a two-level search. Discrete latent codes are appealing for this purpose, and can be learned by applying recent work on discrete autoencoders. Based on these insights, we introduce the Latent Programmer (LP), a program synthesis method that first predicts a discrete latent code from input/output examples, and then generates the program in the target language. We evaluate the LP on two domains, demonstrating that it yields an improvement in accuracy, especially on longer programs for which search is most difficult.", "bibtex": "@InProceedings{pmlr-v139-hong21a,\n title = \t {Latent Programmer: Discrete Latent Codes for Program Synthesis},\n author = {Hong, Joey and Dohan, David and Singh, Rishabh and Sutton, Charles and Zaheer, Manzil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4308--4318},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hong21a/hong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hong21a.html},\n abstract = \t {A key problem in program synthesis is searching over the large space of possible programs. Human programmers might decide the high-level structure of the desired program before thinking about the details; motivated by this intuition, we consider two-level search for program synthesis, in which the synthesizer first generates a plan, a sequence of symbols that describes the desired program at a high level, before generating the program. We propose to learn representations of programs that can act as plans to organize such a two-level search. Discrete latent codes are appealing for this purpose, and can be learned by applying recent work on discrete autoencoders. Based on these insights, we introduce the Latent Programmer (LP), a program synthesis method that first predicts a discrete latent code from input/output examples, and then generates the program in the target language. We evaluate the LP on two domains, demonstrating that it yields an improvement in accuracy, especially on longer programs for which search is most difficult.}\n}", "pdf": "http://proceedings.mlr.press/v139/hong21a/hong21a.pdf", "supp": "", "pdf_size": 365538, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9789877360194738968&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA; Google Research, Mountain View, CA, USA", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hong21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Latent Space Energy-Based Model of Symbol-Vector Coupling for Text Generation and Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8911", "id": "8911", "proceeding": "http://proceedings.mlr.press/v139/pang21a.html", "slides": "", "author_site": "Bo Pang, Ying Nian Wu", "author": "Bo Pang; Ying Nian Wu", "abstract": "We propose a latent space energy-based prior model for text generation and classification. The model stands on a generator network that generates the text sequence based on a continuous latent vector. The energy term of the prior model couples a continuous latent vector and a symbolic one-hot vector, so that discrete category can be inferred from the observed example based on the continuous latent vector. Such a latent space coupling naturally enables incorporation of information bottleneck regularization to encourage the continuous latent vector to extract information from the observed example that is informative of the underlying category. In our learning method, the symbol-vector coupling, the generator network and the inference network are learned jointly. Our model can be learned in an unsupervised setting where no category labels are provided. It can also be learned in semi-supervised setting where category labels are provided for a subset of training examples. Our experiments demonstrate that the proposed model learns well-structured and meaningful latent space, which (1) guides the generator to generate text with high quality, diversity, and interpretability, and (2) effectively classifies text.", "bibtex": "@InProceedings{pmlr-v139-pang21a,\n title = \t {Latent Space Energy-Based Model of Symbol-Vector Coupling for Text Generation and Classification},\n author = {Pang, Bo and Wu, Ying Nian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8359--8370},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pang21a/pang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/pang21a.html},\n abstract = \t {We propose a latent space energy-based prior model for text generation and classification. The model stands on a generator network that generates the text sequence based on a continuous latent vector. The energy term of the prior model couples a continuous latent vector and a symbolic one-hot vector, so that discrete category can be inferred from the observed example based on the continuous latent vector. Such a latent space coupling naturally enables incorporation of information bottleneck regularization to encourage the continuous latent vector to extract information from the observed example that is informative of the underlying category. In our learning method, the symbol-vector coupling, the generator network and the inference network are learned jointly. Our model can be learned in an unsupervised setting where no category labels are provided. It can also be learned in semi-supervised setting where category labels are provided for a subset of training examples. Our experiments demonstrate that the proposed model learns well-structured and meaningful latent space, which (1) guides the generator to generate text with high quality, diversity, and interpretability, and (2) effectively classifies text.}\n}", "pdf": "http://proceedings.mlr.press/v139/pang21a/pang21a.pdf", "supp": "", "pdf_size": 668271, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18132333076288060504&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Statistics, University of California, Los Angeles, California, USA; Department of Statistics, University of California, Los Angeles, California, USA", "aff_domain": "ucla.edu; ", "email": "ucla.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/pang21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Learn-to-Share: A Hardware-friendly Transfer Learning Framework Exploiting Computation and Parameter Sharing", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9845", "id": "9845", "proceeding": "http://proceedings.mlr.press/v139/fu21a.html", "slides": "", "author_site": "Cheng Fu, Hanxian Huang, Xinyun Chen, Yuandong Tian, Jishen Zhao", "author": "Cheng Fu; Hanxian Huang; Xinyun Chen; Yuandong Tian; Jishen Zhao", "abstract": "Task-specific fine-tuning on pre-trained transformers has achieved performance breakthroughs in multiple NLP tasks. Yet, as both computation and parameter size grows linearly with the number of sub-tasks, it is increasingly difficult to adopt such methods to the real world due to unrealistic memory and computation overhead on computing devices. Previous works on fine-tuning focus on reducing the growing parameter size to save storage cost by parameter sharing. However, compared to storage, the constraint of computation is a more critical issue with the fine-tuning models in modern computing environments. In this work, we propose LeTS, a framework that leverages both computation and parameter sharing across multiple tasks. Compared to traditional fine-tuning, LeTS proposes a novel neural architecture that contains a fixed pre-trained transformer model, plus learnable additive components for sub-tasks. The learnable components reuse the intermediate activations in the fixed pre-trained model, decoupling computation dependency. Differentiable neural architecture search is used to determine a task-specific computation sharing scheme, and a novel early stage pruning is applied to additive components for sparsity to achieve parameter sharing. Extensive experiments show that with 1.4% of extra parameters per task, LeTS reduces the computation by 49.5% on GLUE benchmarks with only 0.2% accuracy loss compared to full fine-tuning.", "bibtex": "@InProceedings{pmlr-v139-fu21a,\n title = \t {Learn-to-Share: A Hardware-friendly Transfer Learning Framework Exploiting Computation and Parameter Sharing},\n author = {Fu, Cheng and Huang, Hanxian and Chen, Xinyun and Tian, Yuandong and Zhao, Jishen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3469--3479},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fu21a/fu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fu21a.html},\n abstract = \t {Task-specific fine-tuning on pre-trained transformers has achieved performance breakthroughs in multiple NLP tasks. Yet, as both computation and parameter size grows linearly with the number of sub-tasks, it is increasingly difficult to adopt such methods to the real world due to unrealistic memory and computation overhead on computing devices. Previous works on fine-tuning focus on reducing the growing parameter size to save storage cost by parameter sharing. However, compared to storage, the constraint of computation is a more critical issue with the fine-tuning models in modern computing environments. In this work, we propose LeTS, a framework that leverages both computation and parameter sharing across multiple tasks. Compared to traditional fine-tuning, LeTS proposes a novel neural architecture that contains a fixed pre-trained transformer model, plus learnable additive components for sub-tasks. The learnable components reuse the intermediate activations in the fixed pre-trained model, decoupling computation dependency. Differentiable neural architecture search is used to determine a task-specific computation sharing scheme, and a novel early stage pruning is applied to additive components for sparsity to achieve parameter sharing. Extensive experiments show that with 1.4% of extra parameters per task, LeTS reduces the computation by 49.5% on GLUE benchmarks with only 0.2% accuracy loss compared to full fine-tuning.}\n}", "pdf": "http://proceedings.mlr.press/v139/fu21a/fu21a.pdf", "supp": "", "pdf_size": 1519299, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=720455953403306617&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of California, San Diego; University of California, San Diego; University of California, Berkeley; Facebook AI Research; University of California, San Diego", "aff_domain": "eng.ucsd.edu; ; ; ; ", "email": "eng.ucsd.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/fu21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "University of California, San Diego;University of California, Berkeley;Meta", "aff_unique_dep": ";;Facebook AI Research", "aff_unique_url": "https://www.ucsd.edu;https://www.berkeley.edu;https://research.facebook.com", "aff_unique_abbr": "UCSD;UC Berkeley;FAIR", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "San Diego;Berkeley;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learn2Hop: Learned Optimization on Rough Landscapes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9417", "id": "9417", "proceeding": "http://proceedings.mlr.press/v139/merchant21a.html", "slides": "", "author_site": "Amil Merchant, Luke Metz, Samuel Schoenholz, Ekin Dogus Cubuk", "author": "Amil Merchant; Luke Metz; Samuel S Schoenholz; Ekin D Cubuk", "abstract": "Optimization of non-convex loss surfaces containing many local minima remains a critical problem in a variety of domains, including operations research, informatics, and material design. Yet, current techniques either require extremely high iteration counts or a large number of random restarts for good performance. In this work, we propose adapting recent developments in meta-learning to these many-minima problems by learning the optimization algorithm for various loss landscapes. We focus on problems from atomic structural optimization\u2014finding low energy configurations of many-atom systems\u2014including widely studied models such as bimetallic clusters and disordered silicon. We find that our optimizer learns a hopping behavior which enables efficient exploration and improves the rate of low energy minima discovery. Finally, our learned optimizers show promising generalization with efficiency gains on never before seen tasks (e.g. new elements or compositions). Code is available at https://learn2hop.page.link/github.", "bibtex": "@InProceedings{pmlr-v139-merchant21a,\n title = \t {Learn2Hop: Learned Optimization on Rough Landscapes},\n author = {Merchant, Amil and Metz, Luke and Schoenholz, Samuel S and Cubuk, Ekin D},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7643--7653},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/merchant21a/merchant21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/merchant21a.html},\n abstract = \t {Optimization of non-convex loss surfaces containing many local minima remains a critical problem in a variety of domains, including operations research, informatics, and material design. Yet, current techniques either require extremely high iteration counts or a large number of random restarts for good performance. In this work, we propose adapting recent developments in meta-learning to these many-minima problems by learning the optimization algorithm for various loss landscapes. We focus on problems from atomic structural optimization\u2014finding low energy configurations of many-atom systems\u2014including widely studied models such as bimetallic clusters and disordered silicon. We find that our optimizer learns a hopping behavior which enables efficient exploration and improves the rate of low energy minima discovery. Finally, our learned optimizers show promising generalization with efficiency gains on never before seen tasks (e.g. new elements or compositions). Code is available at https://learn2hop.page.link/github.}\n}", "pdf": "http://proceedings.mlr.press/v139/merchant21a/merchant21a.pdf", "supp": "", "pdf_size": 1206946, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5582210419946042265&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, Mountain View, California, USA + Google AI Residency Program; Google Research, Mountain View, California, USA; Google Research, Mountain View, California, USA; Google Research, Mountain View, California, USA", "aff_domain": "google.com; ; ;google.com", "email": "google.com; ; ;google.com", "github": "https://learn2hop.page.link/github", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/merchant21a.html", "aff_unique_index": "0+0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0+0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learner-Private Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9455", "id": "9455", "proceeding": "http://proceedings.mlr.press/v139/xu21i.html", "slides": "", "author_site": "Jiaming Xu, Kuang Xu, Dana Yang", "author": "Jiaming Xu; Kuang Xu; Dana Yang", "abstract": "Convex optimization with feedback is a framework where a learner relies on iterative queries and feedback to arrive at the minimizer of a convex function. The paradigm has gained significant popularity recently thanks to its scalability in large-scale optimization and machine learning. The repeated interactions, however, expose the learner to privacy risks from eavesdropping adversaries that observe the submitted queries. In this paper, we study how to optimally obfuscate the learner\u2019s queries in convex optimization with first-order feedback, so that their learned optimal value is provably difficult to estimate for the eavesdropping adversary. We consider two formulations of learner privacy: a Bayesian formulation in which the convex function is drawn randomly, and a minimax formulation in which the function is fixed and the adversary\u2019s probability of error is measured with respect to a minimax criterion. We show that, if the learner wants to ensure the probability of the adversary estimating accurately be kept below 1/L, then the overhead in query complexity is additive in L in the minimax formulation, but multiplicative in L in the Bayesian formulation. Compared to existing learner-private sequential learning models with binary feedback, our results apply to the significantly richer family of general convex functions with full-gradient feedback. Our proofs are largely enabled by tools from the theory of Dirichlet processes, as well as more sophisticated lines of analysis aimed at measuring the amount of information leakage under a full-gradient oracle.", "bibtex": "@InProceedings{pmlr-v139-xu21i,\n title = \t {Learner-Private Convex Optimization},\n author = {Xu, Jiaming and Xu, Kuang and Yang, Dana},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11570--11580},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21i/xu21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21i.html},\n abstract = \t {Convex optimization with feedback is a framework where a learner relies on iterative queries and feedback to arrive at the minimizer of a convex function. The paradigm has gained significant popularity recently thanks to its scalability in large-scale optimization and machine learning. The repeated interactions, however, expose the learner to privacy risks from eavesdropping adversaries that observe the submitted queries. In this paper, we study how to optimally obfuscate the learner\u2019s queries in convex optimization with first-order feedback, so that their learned optimal value is provably difficult to estimate for the eavesdropping adversary. We consider two formulations of learner privacy: a Bayesian formulation in which the convex function is drawn randomly, and a minimax formulation in which the function is fixed and the adversary\u2019s probability of error is measured with respect to a minimax criterion. We show that, if the learner wants to ensure the probability of the adversary estimating accurately be kept below 1/L, then the overhead in query complexity is additive in L in the minimax formulation, but multiplicative in L in the Bayesian formulation. Compared to existing learner-private sequential learning models with binary feedback, our results apply to the significantly richer family of general convex functions with full-gradient feedback. Our proofs are largely enabled by tools from the theory of Dirichlet processes, as well as more sophisticated lines of analysis aimed at measuring the amount of information leakage under a full-gradient oracle.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21i/xu21i.pdf", "supp": "", "pdf_size": 1814501, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9968817912274916443&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "The Fuqua School of Business, Duke University, Durham NC, USA; Stanford Graduate School of Business, Stanford University, Stanford CA, USA; The Fuqua School of Business, Duke University, Durham NC, USA", "aff_domain": "duke.edu;stanford.edu;duke.edu", "email": "duke.edu;stanford.edu;duke.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/xu21i.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Duke University;Stanford University", "aff_unique_dep": "The Fuqua School of Business;Graduate School of Business", "aff_unique_url": "https://www.duke.edu;https://www.stanford.edu", "aff_unique_abbr": "Duke;Stanford", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Durham;Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Binary Decision Trees by Argmin Differentiation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8771", "id": "8771", "proceeding": "http://proceedings.mlr.press/v139/zantedeschi21a.html", "slides": "", "author_site": "Valentina Zantedeschi, Matt J. Kusner, Vlad Niculae", "author": "Valentina Zantedeschi; Matt Kusner; Vlad Niculae", "abstract": "We address the problem of learning binary decision trees that partition data for some downstream task. We propose to learn discrete parameters (i.e., for tree traversals and node pruning) and continuous parameters (i.e., for tree split functions and prediction functions) simultaneously using argmin differentiation. We do so by sparsely relaxing a mixed-integer program for the discrete parameters, to allow gradients to pass through the program to continuous parameters. We derive customized algorithms to efficiently compute the forward and backward passes. This means that our tree learning procedure can be used as an (implicit) layer in arbitrary deep networks, and can be optimized with arbitrary loss functions. We demonstrate that our approach produces binary trees that are competitive with existing single tree and ensemble approaches, in both supervised and unsupervised settings. Further, apart from greedy approaches (which do not have competitive accuracies), our method is faster to train than all other tree-learning baselines we compare with.", "bibtex": "@InProceedings{pmlr-v139-zantedeschi21a,\n title = \t {Learning Binary Decision Trees by Argmin Differentiation},\n author = {Zantedeschi, Valentina and Kusner, Matt and Niculae, Vlad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12298--12309},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zantedeschi21a/zantedeschi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zantedeschi21a.html},\n abstract = \t {We address the problem of learning binary decision trees that partition data for some downstream task. We propose to learn discrete parameters (i.e., for tree traversals and node pruning) and continuous parameters (i.e., for tree split functions and prediction functions) simultaneously using argmin differentiation. We do so by sparsely relaxing a mixed-integer program for the discrete parameters, to allow gradients to pass through the program to continuous parameters. We derive customized algorithms to efficiently compute the forward and backward passes. This means that our tree learning procedure can be used as an (implicit) layer in arbitrary deep networks, and can be optimized with arbitrary loss functions. We demonstrate that our approach produces binary trees that are competitive with existing single tree and ensemble approaches, in both supervised and unsupervised settings. Further, apart from greedy approaches (which do not have competitive accuracies), our method is faster to train than all other tree-learning baselines we compare with.}\n}", "pdf": "http://proceedings.mlr.press/v139/zantedeschi21a/zantedeschi21a.pdf", "supp": "", "pdf_size": 909720, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8235159658077202682&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Inria, Lille - Nord Europe research centre+University College London, Centre for Arti\ufb01cial Intelligence; University College London, Centre for Arti\ufb01cial Intelligence; Informatics Institute, University of Amsterdam", "aff_domain": "gmail.com;ucl.ac.uk;uva.nl", "email": "gmail.com;ucl.ac.uk;uva.nl", "github": "https://github.com/vzantedeschi/LatentTrees", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zantedeschi21a.html", "aff_unique_index": "0+1;1;2", "aff_unique_norm": "INRIA;University College London;University of Amsterdam", "aff_unique_dep": ";Centre for Arti\ufb01cial Intelligence;Informatics Institute", "aff_unique_url": "https://www.inria.fr;https://www.ucl.ac.uk;https://www.uva.nl", "aff_unique_abbr": "Inria;UCL;UvA", "aff_campus_unique_index": "0", "aff_campus_unique": "Lille;", "aff_country_unique_index": "0+1;1;2", "aff_country_unique": "France;United Kingdom;Netherlands" }, { "title": "Learning Bounds for Open-Set Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9075", "id": "9075", "proceeding": "http://proceedings.mlr.press/v139/fang21c.html", "slides": "/media/icml-2021/Slides/9075.pdf", "author_site": "Zhen Fang, Jie Lu, Anjin Liu, Feng Liu, Guangquan Zhang", "author": "Zhen Fang; Jie Lu; Anjin Liu; Feng Liu; Guangquan Zhang", "abstract": "Traditional supervised learning aims to train a classifier in the closed-set world, where training and test samples share the same label space. In this paper, we target a more challenging and re_x0002_alistic setting: open-set learning (OSL), where there exist test samples from the classes that are unseen during training. Although researchers have designed many methods from the algorith_x0002_mic perspectives, there are few methods that pro_x0002_vide generalization guarantees on their ability to achieve consistent performance on different train_x0002_ing samples drawn from the same distribution. Motivated by the transfer learning and probably approximate correct (PAC) theory, we make a bold attempt to study OSL by proving its general_x0002_ization error-given training samples with size n, the estimation error will get close to order Op(1/$\\sqrt{}$n). This is the first study to provide a generalization bound for OSL, which we do by theoretically investigating the risk of the tar_x0002_get classifier on unknown classes. According to our theory, a novel algorithm, called auxiliary open-set risk (AOSR) is proposed to address the OSL problem. Experiments verify the efficacy of AOSR. The code is available at github.com/AnjinLiu/Openset_Learning_AOSR.", "bibtex": "@InProceedings{pmlr-v139-fang21c,\n title = \t {Learning Bounds for Open-Set Learning},\n author = {Fang, Zhen and Lu, Jie and Liu, Anjin and Liu, Feng and Zhang, Guangquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3122--3132},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fang21c/fang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/fang21c.html},\n abstract = \t {Traditional supervised learning aims to train a classifier in the closed-set world, where training and test samples share the same label space. In this paper, we target a more challenging and re_x0002_alistic setting: open-set learning (OSL), where there exist test samples from the classes that are unseen during training. Although researchers have designed many methods from the algorith_x0002_mic perspectives, there are few methods that pro_x0002_vide generalization guarantees on their ability to achieve consistent performance on different train_x0002_ing samples drawn from the same distribution. Motivated by the transfer learning and probably approximate correct (PAC) theory, we make a bold attempt to study OSL by proving its general_x0002_ization error-given training samples with size n, the estimation error will get close to order Op(1/$\\sqrt{}$n). This is the first study to provide a generalization bound for OSL, which we do by theoretically investigating the risk of the tar_x0002_get classifier on unknown classes. According to our theory, a novel algorithm, called auxiliary open-set risk (AOSR) is proposed to address the OSL problem. Experiments verify the efficacy of AOSR. The code is available at github.com/AnjinLiu/Openset_Learning_AOSR.}\n}", "pdf": "http://proceedings.mlr.press/v139/fang21c/fang21c.pdf", "supp": "", "pdf_size": 1093565, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5726822076204238537&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "AAII, University of Technology Sydney; AAII, University of Technology Sydney; AAII, University of Technology Sydney; AAII, University of Technology Sydney; AAII, University of Technology Sydney", "aff_domain": "student.uts.edu.au;uts.edu.au;uts.edu.au; ; ", "email": "student.uts.edu.au;uts.edu.au;uts.edu.au; ; ", "github": "github.com/Anjin-Liu/Openset_Learning_AOSR", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/fang21c.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Technology Sydney", "aff_unique_dep": "AAII", "aff_unique_url": "https://www.uts.edu.au", "aff_unique_abbr": "UTS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Australia" }, { "title": "Learning Curves for Analysis of Deep Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8899", "id": "8899", "proceeding": "http://proceedings.mlr.press/v139/hoiem21a.html", "slides": "/media/icml-2021/Slides/8899.pdf", "author_site": "Derek Hoiem, Tanmay Gupta, Zhizhong Li, Michal Shlapentokh-Rothman", "author": "Derek Hoiem; Tanmay Gupta; Zhizhong Li; Michal Shlapentokh-Rothman", "abstract": "Learning curves model a classifier\u2019s test error as a function of the number of training samples. Prior works show that learning curves can be used to select model parameters and extrapolate performance. We investigate how to use learning curves to evaluate design choices, such as pretraining, architecture, and data augmentation. We propose a method to robustly estimate learning curves, abstract their parameters into error and data-reliance, and evaluate the effectiveness of different parameterizations. Our experiments exemplify use of learning curves for analysis and yield several interesting observations.", "bibtex": "@InProceedings{pmlr-v139-hoiem21a,\n title = \t {Learning Curves for Analysis of Deep Networks},\n author = {Hoiem, Derek and Gupta, Tanmay and Li, Zhizhong and Shlapentokh-Rothman, Michal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4287--4296},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hoiem21a/hoiem21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hoiem21a.html},\n abstract = \t {Learning curves model a classifier\u2019s test error as a function of the number of training samples. Prior works show that learning curves can be used to select model parameters and extrapolate performance. We investigate how to use learning curves to evaluate design choices, such as pretraining, architecture, and data augmentation. We propose a method to robustly estimate learning curves, abstract their parameters into error and data-reliance, and evaluate the effectiveness of different parameterizations. Our experiments exemplify use of learning curves for analysis and yield several interesting observations.}\n}", "pdf": "http://proceedings.mlr.press/v139/hoiem21a/hoiem21a.pdf", "supp": "", "pdf_size": 1128622, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16115346742858062645&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Illinois at Urbana-Champaign; PRIOR @ Allen Institute for AI; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu; ; ; ", "email": "illinois.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/hoiem21a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign;Allen Institute for AI", "aff_unique_dep": ";PRIOR", "aff_unique_url": "https://illinois.edu;https://allenai.org", "aff_unique_abbr": "UIUC;AI2", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Urbana-Champaign;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Deep Neural Networks under Agnostic Corrupted Supervision", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10557", "id": "10557", "proceeding": "http://proceedings.mlr.press/v139/liu21v.html", "slides": "", "author_site": "Boyang Liu, Mengying Sun, Ding Wang, Pang-Ning Tan, Jiayu Zhou", "author": "Boyang Liu; Mengying Sun; Ding Wang; Pang-Ning Tan; Jiayu Zhou", "abstract": "Training deep neural network models in the presence of corrupted supervision is challenging as the corrupted data points may significantly impact generalization performance. To alleviate this problem, we present an efficient robust algorithm that achieves strong guarantees without any assumption on the type of corruption and provides a unified framework for both classification and regression problems. Unlike many existing approaches that quantify the quality of the data points (e.g., based on their individual loss values), and filter them accordingly, the proposed algorithm focuses on controlling the collective impact of data points on the average gradient. Even when a corrupted data point failed to be excluded by our algorithm, the data point will have a very limited impact on the overall loss, as compared with state-of-the-art filtering methods based on loss values. Extensive experiments on multiple benchmark datasets have demonstrated the robustness of our algorithm under different types of corruption. Our code is available at \\url{https://github.com/illidanlab/PRL}.", "bibtex": "@InProceedings{pmlr-v139-liu21v,\n title = \t {Learning Deep Neural Networks under Agnostic Corrupted Supervision},\n author = {Liu, Boyang and Sun, Mengying and Wang, Ding and Tan, Pang-Ning and Zhou, Jiayu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6957--6967},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21v/liu21v.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21v.html},\n abstract = \t {Training deep neural network models in the presence of corrupted supervision is challenging as the corrupted data points may significantly impact generalization performance. To alleviate this problem, we present an efficient robust algorithm that achieves strong guarantees without any assumption on the type of corruption and provides a unified framework for both classification and regression problems. Unlike many existing approaches that quantify the quality of the data points (e.g., based on their individual loss values), and filter them accordingly, the proposed algorithm focuses on controlling the collective impact of data points on the average gradient. Even when a corrupted data point failed to be excluded by our algorithm, the data point will have a very limited impact on the overall loss, as compared with state-of-the-art filtering methods based on loss values. Extensive experiments on multiple benchmark datasets have demonstrated the robustness of our algorithm under different types of corruption. Our code is available at \\url{https://github.com/illidanlab/PRL}.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21v/liu21v.pdf", "supp": "", "pdf_size": 1151634, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3309135879887921559&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA; Department of Computer Science and Engineering, Michigan State University, USA", "aff_domain": "msu.edu; ; ; ;msu.edu", "email": "msu.edu; ; ; ;msu.edu", "github": "https://github.com/illidanlab/PRL", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21v.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Michigan State University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.msu.edu", "aff_unique_abbr": "MSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Diverse-Structured Networks for Adversarial Robustness", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10509", "id": "10509", "proceeding": "http://proceedings.mlr.press/v139/du21f.html", "slides": "", "author_site": "Xuefeng Du, Jingfeng Zhang, Bo Han, Tongliang Liu, Yu Rong, Gang Niu, Junzhou Huang, Masashi Sugiyama", "author": "Xuefeng Du; Jingfeng Zhang; Bo Han; Tongliang Liu; Yu Rong; Gang Niu; Junzhou Huang; Masashi Sugiyama", "abstract": "In adversarial training (AT), the main focus has been the objective and optimizer while the model has been less studied, so that the models being used are still those classic ones in standard training (ST). Classic network architectures (NAs) are generally worse than searched NA in ST, which should be the same in AT. In this paper, we argue that NA and AT cannot be handled independently, since given a dataset, the optimal NA in ST would be no longer optimal in AT. That being said, AT is time-consuming itself; if we directly search NAs in AT over large search spaces, the computation will be practically infeasible. Thus, we propose diverse-structured network (DS-Net), to significantly reduce the size of the search space: instead of low-level operations, we only consider predefined atomic blocks, where an atomic block is a time-tested building block like the residual block. There are only a few atomic blocks and thus we can weight all atomic blocks rather than find the best one in a searched block of DS-Net, which is an essential tradeoff between exploring diverse structures and exploiting the best structures. Empirical results demonstrate the advantages of DS-Net, i.e., weighting the atomic blocks.", "bibtex": "@InProceedings{pmlr-v139-du21f,\n title = \t {Learning Diverse-Structured Networks for Adversarial Robustness},\n author = {Du, Xuefeng and Zhang, Jingfeng and Han, Bo and Liu, Tongliang and Rong, Yu and Niu, Gang and Huang, Junzhou and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2880--2891},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21f/du21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21f.html},\n abstract = \t {In adversarial training (AT), the main focus has been the objective and optimizer while the model has been less studied, so that the models being used are still those classic ones in standard training (ST). Classic network architectures (NAs) are generally worse than searched NA in ST, which should be the same in AT. In this paper, we argue that NA and AT cannot be handled independently, since given a dataset, the optimal NA in ST would be no longer optimal in AT. That being said, AT is time-consuming itself; if we directly search NAs in AT over large search spaces, the computation will be practically infeasible. Thus, we propose diverse-structured network (DS-Net), to significantly reduce the size of the search space: instead of low-level operations, we only consider predefined atomic blocks, where an atomic block is a time-tested building block like the residual block. There are only a few atomic blocks and thus we can weight all atomic blocks rather than find the best one in a searched block of DS-Net, which is an essential tradeoff between exploring diverse structures and exploiting the best structures. Empirical results demonstrate the advantages of DS-Net, i.e., weighting the atomic blocks.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21f/du21f.pdf", "supp": "", "pdf_size": 1153562, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4158996356819287139&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Hong Kong Baptist University + University of Wisconsin-Madison; RIKEN; Hong Kong Baptist University; University of Sydney; Tencent AI Lab; RIKEN; Tencent AI Lab; University of Tokyo", "aff_domain": "hkbu.edu.hk;riken.jp;comp.hkbu.edu.hk;sydney.edu.au;tencent.com;riken.jp;tencent.com;k.u-tokyo.ac.jp", "email": "hkbu.edu.hk;riken.jp;comp.hkbu.edu.hk;sydney.edu.au;tencent.com;riken.jp;tencent.com;k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/du21f.html", "aff_unique_index": "0+1;2;0;3;4;2;4;5", "aff_unique_norm": "Hong Kong Baptist University;University of Wisconsin-Madison;RIKEN;University of Sydney;Tencent;University of Tokyo", "aff_unique_dep": ";;;;Tencent AI Lab;", "aff_unique_url": "https://www.hkbu.edu.hk;https://www.wisc.edu;https://www.riken.jp;https://www.sydney.edu.au;https://ai.tencent.com;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "HKBU;UW-Madison;RIKEN;USYD;Tencent AI Lab;UTokyo", "aff_campus_unique_index": "0+1;0", "aff_campus_unique": "Hong Kong SAR;Madison;", "aff_country_unique_index": "0+1;2;0;3;0;2;0;2", "aff_country_unique": "China;United States;Japan;Australia" }, { "title": "Learning Fair Policies in Decentralized Cooperative Multi-Agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9253", "id": "9253", "proceeding": "http://proceedings.mlr.press/v139/zimmer21a.html", "slides": "/media/icml-2021/Slides/9253.pdf", "author_site": "Matthieu Zimmer, Claire Glanois, Umer Siddique, Paul Weng", "author": "Matthieu Zimmer; Claire Glanois; Umer Siddique; Paul Weng", "abstract": "We consider the problem of learning fair policies in (deep) cooperative multi-agent reinforcement learning (MARL). We formalize it in a principled way as the problem of optimizing a welfare function that explicitly encodes two important aspects of fairness: efficiency and equity. We provide a theoretical analysis of the convergence of policy gradient for this problem. As a solution method, we propose a novel neural network architecture, which is composed of two sub-networks specifically designed for taking into account these two aspects of fairness. In experiments, we demonstrate the importance of the two sub-networks for fair optimization. Our overall approach is general as it can accommodate any (sub)differentiable welfare function. Therefore, it is compatible with various notions of fairness that have been proposed in the literature (e.g., lexicographic maximin, generalized Gini social welfare function, proportional fairness). Our method is generic and can be implemented in various MARL settings: centralized training and decentralized execution, or fully decentralized. Finally, we experimentally validate our approach in various domains and show that it can perform much better than previous methods, both in terms of efficiency and equity.", "bibtex": "@InProceedings{pmlr-v139-zimmer21a,\n title = \t {Learning Fair Policies in Decentralized Cooperative Multi-Agent Reinforcement Learning},\n author = {Zimmer, Matthieu and Glanois, Claire and Siddique, Umer and Weng, Paul},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12967--12978},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zimmer21a/zimmer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zimmer21a.html},\n abstract = \t {We consider the problem of learning fair policies in (deep) cooperative multi-agent reinforcement learning (MARL). We formalize it in a principled way as the problem of optimizing a welfare function that explicitly encodes two important aspects of fairness: efficiency and equity. We provide a theoretical analysis of the convergence of policy gradient for this problem. As a solution method, we propose a novel neural network architecture, which is composed of two sub-networks specifically designed for taking into account these two aspects of fairness. In experiments, we demonstrate the importance of the two sub-networks for fair optimization. Our overall approach is general as it can accommodate any (sub)differentiable welfare function. Therefore, it is compatible with various notions of fairness that have been proposed in the literature (e.g., lexicographic maximin, generalized Gini social welfare function, proportional fairness). Our method is generic and can be implemented in various MARL settings: centralized training and decentralized execution, or fully decentralized. Finally, we experimentally validate our approach in various domains and show that it can perform much better than previous methods, both in terms of efficiency and equity.}\n}", "pdf": "http://proceedings.mlr.press/v139/zimmer21a/zimmer21a.pdf", "supp": "", "pdf_size": 5325611, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4138196305304248608&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "UM-SJTU Joint Institute, Shanghai Jiao Tong University, China; UM-SJTU Joint Institute, Shanghai Jiao Tong University, China; UM-SJTU Joint Institute, Shanghai Jiao Tong University, China; Department of Automation, Shanghai Jiao Tong University, Shanghai, China", "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zimmer21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Shanghai Jiao Tong University", "aff_unique_dep": "UM-SJTU Joint Institute", "aff_unique_url": "https://en.sjtu.edu.cn", "aff_unique_abbr": "SJTU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Shanghai", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Learning Generalized Intersection Over Union for Dense Pixelwise Prediction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9661", "id": "9661", "proceeding": "http://proceedings.mlr.press/v139/yu21e.html", "slides": "/media/icml-2021/Slides/9661.pdf", "author_site": "Jiaqian Yu, Jingtao Xu, Yiwei Chen, Weiming Li, Qiang Wang, ByungIn Yoo, Jae-Joon Han", "author": "Jiaqian Yu; Jingtao Xu; Yiwei Chen; Weiming Li; Qiang Wang; Byungin Yoo; Jae-Joon Han", "abstract": "Intersection over union (IoU) score, also named Jaccard Index, is one of the most fundamental evaluation methods in machine learning. The original IoU computation cannot provide non-zero gradients and thus cannot be directly optimized by nowadays deep learning methods. Several recent works generalized IoU for bounding box regression, but they are not straightforward to adapt for pixelwise prediction. In particular, the original IoU fails to provide effective gradients for the non-overlapping and location-deviation cases, which results in performance plateau. In this paper, we propose PixIoU, a generalized IoU for pixelwise prediction that is sensitive to the distance for non-overlapping cases and the locations in prediction. We provide proofs that PixIoU holds many nice properties as the original IoU. To optimize the PixIoU, we also propose a loss function that is proved to be submodular, hence we can apply the Lov\u00e1sz functions, the efficient surrogates for submodular functions for learning this loss. Experimental results show consistent performance improvements by learning PixIoU over the original IoU for several different pixelwise prediction tasks on Pascal VOC, VOT-2020 and Cityscapes.", "bibtex": "@InProceedings{pmlr-v139-yu21e,\n title = \t {Learning Generalized Intersection Over Union for Dense Pixelwise Prediction},\n author = {Yu, Jiaqian and Xu, Jingtao and Chen, Yiwei and Li, Weiming and Wang, Qiang and Yoo, Byungin and Han, Jae-Joon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12198--12207},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21e/yu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21e.html},\n abstract = \t {Intersection over union (IoU) score, also named Jaccard Index, is one of the most fundamental evaluation methods in machine learning. The original IoU computation cannot provide non-zero gradients and thus cannot be directly optimized by nowadays deep learning methods. Several recent works generalized IoU for bounding box regression, but they are not straightforward to adapt for pixelwise prediction. In particular, the original IoU fails to provide effective gradients for the non-overlapping and location-deviation cases, which results in performance plateau. In this paper, we propose PixIoU, a generalized IoU for pixelwise prediction that is sensitive to the distance for non-overlapping cases and the locations in prediction. We provide proofs that PixIoU holds many nice properties as the original IoU. To optimize the PixIoU, we also propose a loss function that is proved to be submodular, hence we can apply the Lov\u00e1sz functions, the efficient surrogates for submodular functions for learning this loss. Experimental results show consistent performance improvements by learning PixIoU over the original IoU for several different pixelwise prediction tasks on Pascal VOC, VOT-2020 and Cityscapes.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21e/yu21e.pdf", "supp": "", "pdf_size": 800949, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9142643575313658323&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Samsung Research China - Beijing, Beijing, China; Samsung Research China - Beijing, Beijing, China; Samsung Research China - Beijing, Beijing, China; Samsung Research China - Beijing, Beijing, China; Samsung Research China - Beijing, Beijing, China; Samsung Advanced Institute of Technology, Suwon, South Korea; Samsung Advanced Institute of Technology, Suwon, South Korea", "aff_domain": "samsung.com; ; ; ; ; ; ", "email": "samsung.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/yu21e.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Samsung", "aff_unique_dep": "Samsung Research China", "aff_unique_url": "https://www.samsung.com/cn/research/", "aff_unique_abbr": "SRC", "aff_campus_unique_index": "0;0;0;0;0;1;1", "aff_campus_unique": "Beijing;Suwon", "aff_country_unique_index": "0;0;0;0;0;1;1", "aff_country_unique": "China;South Korea" }, { "title": "Learning Gradient Fields for Molecular Conformation Generation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8519", "id": "8519", "proceeding": "http://proceedings.mlr.press/v139/shi21b.html", "slides": "/media/icml-2021/Slides/8519.pdf", "author_site": "Chence Shi, Shitong Luo, Minkai Xu, Jian Tang", "author": "Chence Shi; Shitong Luo; Minkai Xu; Jian Tang", "abstract": "We study a fundamental problem in computational chemistry known as molecular conformation generation, trying to predict stable 3D structures from 2D molecular graphs. Existing machine learning approaches usually first predict distances between atoms and then generate a 3D structure satisfying the distances, where noise in predicted distances may induce extra errors during 3D coordinate generation. Inspired by the traditional force field methods for molecular dynamics simulation, in this paper, we propose a novel approach called ConfGF by directly estimating the gradient fields of the log density of atomic coordinates. The estimated gradient fields allow directly generating stable conformations via Langevin dynamics. However, the problem is very challenging as the gradient fields are roto-translation equivariant. We notice that estimating the gradient fields of atomic coordinates can be translated to estimating the gradient fields of interatomic distances, and hence develop a novel algorithm based on recent score-based generative models to effectively estimate these gradients. Experimental results across multiple tasks show that ConfGF outperforms previous state-of-the-art baselines by a significant margin.", "bibtex": "@InProceedings{pmlr-v139-shi21b,\n title = \t {Learning Gradient Fields for Molecular Conformation Generation},\n author = {Shi, Chence and Luo, Shitong and Xu, Minkai and Tang, Jian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9558--9568},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shi21b/shi21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/shi21b.html},\n abstract = \t {We study a fundamental problem in computational chemistry known as molecular conformation generation, trying to predict stable 3D structures from 2D molecular graphs. Existing machine learning approaches usually first predict distances between atoms and then generate a 3D structure satisfying the distances, where noise in predicted distances may induce extra errors during 3D coordinate generation. Inspired by the traditional force field methods for molecular dynamics simulation, in this paper, we propose a novel approach called ConfGF by directly estimating the gradient fields of the log density of atomic coordinates. The estimated gradient fields allow directly generating stable conformations via Langevin dynamics. However, the problem is very challenging as the gradient fields are roto-translation equivariant. We notice that estimating the gradient fields of atomic coordinates can be translated to estimating the gradient fields of interatomic distances, and hence develop a novel algorithm based on recent score-based generative models to effectively estimate these gradients. Experimental results across multiple tasks show that ConfGF outperforms previous state-of-the-art baselines by a significant margin.}\n}", "pdf": "http://proceedings.mlr.press/v139/shi21b/shi21b.pdf", "supp": "", "pdf_size": 3955706, "gs_citation": 258, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1418815604364379894&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "https://github.com/DeepGraphLearning/ConfGF", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/shi21b.html" }, { "title": "Learning Interaction Kernels for Agent Systems on Riemannian Manifolds", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10167", "id": "10167", "proceeding": "http://proceedings.mlr.press/v139/maggioni21a.html", "slides": "", "author_site": "Mauro Maggioni, Jason Miller, Hongda Qiu, Ming Zhong", "author": "Mauro Maggioni; Jason J Miller; Hongda Qiu; Ming Zhong", "abstract": "Interacting agent and particle systems are extensively used to model complex phenomena in science and engineering. We consider the problem of learning interaction kernels in these dynamical systems constrained to evolve on Riemannian manifolds from given trajectory data. The models we consider are based on interaction kernels depending on pairwise Riemannian distances between agents, with agents interacting locally along the direction of the shortest geodesic connecting them. We show that our estimators converge at a rate that is independent of the dimension of the state space, and derive bounds on the trajectory estimation error, on the manifold, between the observed and estimated dynamics. We demonstrate the performance of our estimator on two classical first order interacting systems: Opinion Dynamics and a Predator-Swarm system, with each system constrained on two prototypical manifolds, the $2$-dimensional sphere and the Poincar\u00e9 disk model of hyperbolic space.", "bibtex": "@InProceedings{pmlr-v139-maggioni21a,\n title = \t {Learning Interaction Kernels for Agent Systems on Riemannian Manifolds},\n author = {Maggioni, Mauro and Miller, Jason J and Qiu, Hongda and Zhong, Ming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7290--7300},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/maggioni21a/maggioni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/maggioni21a.html},\n abstract = \t {Interacting agent and particle systems are extensively used to model complex phenomena in science and engineering. We consider the problem of learning interaction kernels in these dynamical systems constrained to evolve on Riemannian manifolds from given trajectory data. The models we consider are based on interaction kernels depending on pairwise Riemannian distances between agents, with agents interacting locally along the direction of the shortest geodesic connecting them. We show that our estimators converge at a rate that is independent of the dimension of the state space, and derive bounds on the trajectory estimation error, on the manifold, between the observed and estimated dynamics. We demonstrate the performance of our estimator on two classical first order interacting systems: Opinion Dynamics and a Predator-Swarm system, with each system constrained on two prototypical manifolds, the $2$-dimensional sphere and the Poincar\u00e9 disk model of hyperbolic space.}\n}", "pdf": "http://proceedings.mlr.press/v139/maggioni21a/maggioni21a.pdf", "supp": "", "pdf_size": 7398159, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5355151320052043623&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Applied Mathematics & Statistics, Johns Hopkins University + Department of Mathematics, Department of Applied Mathematics & Statistics, Mathematical Institute for Data Science, Johns Hopkins University; Department of Applied Mathematics & Statistics, Johns Hopkins University; Department of Applied Mathematics & Statistics, Johns Hopkins University; Department of Applied Mathematics & Statistics, Johns Hopkins University", "aff_domain": "jhu.edu; ; ;jhu.edu", "email": "jhu.edu; ; ;jhu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/maggioni21a.html", "aff_unique_index": "0+0;0;0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Applied Mathematics & Statistics", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Intra-Batch Connections for Deep Metric Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10237", "id": "10237", "proceeding": "http://proceedings.mlr.press/v139/seidenschwarz21a.html", "slides": "/media/icml-2021/Slides/10237.pdf", "author_site": "Jenny Seidenschwarz, Ismail Elezi, Laura Leal-Taix\u00e9", "author": "Jenny Denise Seidenschwarz; Ismail Elezi; Laura Leal-Taix\u00e9", "abstract": "The goal of metric learning is to learn a function that maps samples to a lower-dimensional space where similar samples lie closer than dissimilar ones. Particularly, deep metric learning utilizes neural networks to learn such a mapping. Most approaches rely on losses that only take the relations between pairs or triplets of samples into account, which either belong to the same class or two different classes. However, these methods do not explore the embedding space in its entirety. To this end, we propose an approach based on message passing networks that takes all the relations in a mini-batch into account. We refine embedding vectors by exchanging messages among all samples in a given batch allowing the training process to be aware of its overall structure. Since not all samples are equally important to predict a decision boundary, we use an attention mechanism during message passing to allow samples to weigh the importance of each neighbor accordingly. We achieve state-of-the-art results on clustering and image retrieval on the CUB-200-2011, Cars196, Stanford Online Products, and In-Shop Clothes datasets. To facilitate further research, we make available the code and the models at https://github.com/dvl-tum/intra_batch_connections.", "bibtex": "@InProceedings{pmlr-v139-seidenschwarz21a,\n title = \t {Learning Intra-Batch Connections for Deep Metric Learning},\n author = {Seidenschwarz, Jenny Denise and Elezi, Ismail and Leal-Taix{\\'e}, Laura},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9410--9421},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/seidenschwarz21a/seidenschwarz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/seidenschwarz21a.html},\n abstract = \t {The goal of metric learning is to learn a function that maps samples to a lower-dimensional space where similar samples lie closer than dissimilar ones. Particularly, deep metric learning utilizes neural networks to learn such a mapping. Most approaches rely on losses that only take the relations between pairs or triplets of samples into account, which either belong to the same class or two different classes. However, these methods do not explore the embedding space in its entirety. To this end, we propose an approach based on message passing networks that takes all the relations in a mini-batch into account. We refine embedding vectors by exchanging messages among all samples in a given batch allowing the training process to be aware of its overall structure. Since not all samples are equally important to predict a decision boundary, we use an attention mechanism during message passing to allow samples to weigh the importance of each neighbor accordingly. We achieve state-of-the-art results on clustering and image retrieval on the CUB-200-2011, Cars196, Stanford Online Products, and In-Shop Clothes datasets. To facilitate further research, we make available the code and the models at https://github.com/dvl-tum/intra_batch_connections.}\n}", "pdf": "http://proceedings.mlr.press/v139/seidenschwarz21a/seidenschwarz21a.pdf", "supp": "", "pdf_size": 3365775, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10851391941882516865&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Technical University of Munich, Munich, Germany; Department of Computer Science, Technical University of Munich, Munich, Germany; Department of Computer Science, Technical University of Munich, Munich, Germany", "aff_domain": "tum.de; ; ", "email": "tum.de; ; ", "github": "https://github.com/dvl-tum/intra_batch", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/seidenschwarz21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Technical University of Munich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.tum.de", "aff_unique_abbr": "TUM", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Munich", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Learning Neural Network Subspaces", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9769", "id": "9769", "proceeding": "http://proceedings.mlr.press/v139/wortsman21a.html", "slides": "", "author_site": "Mitchell Wortsman, Maxwell Horton, Carlos Guestrin, Ali Farhadi, Mohammad Rastegari", "author": "Mitchell Wortsman; Maxwell C Horton; Carlos Guestrin; Ali Farhadi; Mohammad Rastegari", "abstract": "Recent observations have advanced our understanding of the neural network optimization landscape, revealing the existence of (1) paths of high accuracy containing diverse solutions and (2) wider minima offering improved performance. Previous methods observing diverse paths require multiple training runs. In contrast we aim to leverage both property (1) and (2) with a single method and in a single training run. With a similar computational cost as training one model, we learn lines, curves, and simplexes of high-accuracy neural networks. These neural network subspaces contain diverse solutions that can be ensembled, approaching the ensemble performance of independently trained networks without the training cost. Moreover, using the subspace midpoint boosts accuracy, calibration, and robustness to label noise, outperforming Stochastic Weight Averaging.", "bibtex": "@InProceedings{pmlr-v139-wortsman21a,\n title = \t {Learning Neural Network Subspaces},\n author = {Wortsman, Mitchell and Horton, Maxwell C and Guestrin, Carlos and Farhadi, Ali and Rastegari, Mohammad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11217--11227},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wortsman21a/wortsman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wortsman21a.html},\n abstract = \t {Recent observations have advanced our understanding of the neural network optimization landscape, revealing the existence of (1) paths of high accuracy containing diverse solutions and (2) wider minima offering improved performance. Previous methods observing diverse paths require multiple training runs. In contrast we aim to leverage both property (1) and (2) with a single method and in a single training run. With a similar computational cost as training one model, we learn lines, curves, and simplexes of high-accuracy neural networks. These neural network subspaces contain diverse solutions that can be ensembled, approaching the ensemble performance of independently trained networks without the training cost. Moreover, using the subspace midpoint boosts accuracy, calibration, and robustness to label noise, outperforming Stochastic Weight Averaging.}\n}", "pdf": "http://proceedings.mlr.press/v139/wortsman21a/wortsman21a.pdf", "supp": "", "pdf_size": 920153, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10251875714480398754&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of Washington (work completed during internship at Apple); Apple; Apple; Apple; Apple", "aff_domain": "cs.washington.edu; ; ; ; ", "email": "cs.washington.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wortsman21a.html", "aff_unique_index": "0;1;1;1;1", "aff_unique_norm": "University of Washington;Apple", "aff_unique_dep": ";Apple Inc.", "aff_unique_url": "https://www.washington.edu;https://www.apple.com", "aff_unique_abbr": "UW;Apple", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Node Representations Using Stationary Flow Prediction on Large Payment and Cash Transaction Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9941", "id": "9941", "proceeding": "http://proceedings.mlr.press/v139/ceylan21a.html", "slides": "/media/icml-2021/Slides/9941.pdf", "author_site": "Ciwan Ceylan, Salla Franz\u00e9n, Florian T. Pokorny", "author": "Ciwan Ceylan; Salla Franz\u00e9n; Florian T. Pokorny", "abstract": "Banks are required to analyse large transaction datasets as a part of the fight against financial crime. Today, this analysis is either performed manually by domain experts or using expensive feature engineering. Gradient flow analysis allows for basic representation learning as node potentials can be inferred directly from network transaction data. However, the gradient model has a fundamental limitation: it cannot represent all types of of network flows. Furthermore, standard methods for learning the gradient flow are not appropriate for flow signals that span multiple orders of magnitude and contain outliers, i.e. transaction data. In this work, the gradient model is extended to a gated version and we prove that it, unlike the gradient model, is a universal approximator for flows on graphs. To tackle the mentioned challenges of transaction data, we propose a multi-scale and outlier robust loss function based on the Student-t log-likelihood. Ethereum transaction data is used for evaluation and the gradient models outperform MLP models using hand-engineered and node2vec features in terms of relative error. These results extend to 60 synthetic datasets, with experiments also showing that the gated gradient model learns qualitative information about the underlying synthetic generative flow distributions.", "bibtex": "@InProceedings{pmlr-v139-ceylan21a,\n title = \t {Learning Node Representations Using Stationary Flow Prediction on Large Payment and Cash Transaction Networks},\n author = {Ceylan, Ciwan and Franz{\\'e}n, Salla and Pokorny, Florian T.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1395--1406},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ceylan21a/ceylan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ceylan21a.html},\n abstract = \t {Banks are required to analyse large transaction datasets as a part of the fight against financial crime. Today, this analysis is either performed manually by domain experts or using expensive feature engineering. Gradient flow analysis allows for basic representation learning as node potentials can be inferred directly from network transaction data. However, the gradient model has a fundamental limitation: it cannot represent all types of of network flows. Furthermore, standard methods for learning the gradient flow are not appropriate for flow signals that span multiple orders of magnitude and contain outliers, i.e. transaction data. In this work, the gradient model is extended to a gated version and we prove that it, unlike the gradient model, is a universal approximator for flows on graphs. To tackle the mentioned challenges of transaction data, we propose a multi-scale and outlier robust loss function based on the Student-t log-likelihood. Ethereum transaction data is used for evaluation and the gradient models outperform MLP models using hand-engineered and node2vec features in terms of relative error. These results extend to 60 synthetic datasets, with experiments also showing that the gated gradient model learns qualitative information about the underlying synthetic generative flow distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/ceylan21a/ceylan21a.pdf", "supp": "", "pdf_size": 1579535, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15458238969889388248&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "RPL, EECS, KTH Royal Institute of Technology, Stockholm, Sweden + SEB Group, Stockholm, Sweden; SEB Group, Stockholm, Sweden; RPL, EECS, KTH Royal Institute of Technology, Stockholm, Sweden", "aff_domain": "kth.se; ; ", "email": "kth.se; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ceylan21a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "KTH Royal Institute of Technology;SEB Group", "aff_unique_dep": "EECS;", "aff_unique_url": "https://www.kth.se;https://www.sebgroup.com", "aff_unique_abbr": "KTH;SEB", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "Stockholm", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "Sweden" }, { "title": "Learning Noise Transition Matrix from Only Noisy Labels via Total Variation Regularization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10283", "id": "10283", "proceeding": "http://proceedings.mlr.press/v139/zhang21n.html", "slides": "/media/icml-2021/Slides/10283.pdf", "author_site": "Yivan Zhang, Gang Niu, Masashi Sugiyama", "author": "Yivan Zhang; Gang Niu; Masashi Sugiyama", "abstract": "Many weakly supervised classification methods employ a noise transition matrix to capture the class-conditional label corruption. To estimate the transition matrix from noisy data, existing methods often need to estimate the noisy class-posterior, which could be unreliable due to the overconfidence of neural networks. In this work, we propose a theoretically grounded method that can estimate the noise transition matrix and learn a classifier simultaneously, without relying on the error-prone noisy class-posterior estimation. Concretely, inspired by the characteristics of the stochastic label corruption process, we propose total variation regularization, which encourages the predicted probabilities to be more distinguishable from each other. Under mild assumptions, the proposed method yields a consistent estimator of the transition matrix. We show the effectiveness of the proposed method through experiments on benchmark and real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-zhang21n,\n title = \t {Learning Noise Transition Matrix from Only Noisy Labels via Total Variation Regularization},\n author = {Zhang, Yivan and Niu, Gang and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12501--12512},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21n/zhang21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21n.html},\n abstract = \t {Many weakly supervised classification methods employ a noise transition matrix to capture the class-conditional label corruption. To estimate the transition matrix from noisy data, existing methods often need to estimate the noisy class-posterior, which could be unreliable due to the overconfidence of neural networks. In this work, we propose a theoretically grounded method that can estimate the noise transition matrix and learn a classifier simultaneously, without relying on the error-prone noisy class-posterior estimation. Concretely, inspired by the characteristics of the stochastic label corruption process, we propose total variation regularization, which encourages the predicted probabilities to be more distinguishable from each other. Under mild assumptions, the proposed method yields a consistent estimator of the transition matrix. We show the effectiveness of the proposed method through experiments on benchmark and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21n/zhang21n.pdf", "supp": "", "pdf_size": 1437662, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14671082055157503187&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "The University of Tokyo, Japan+RIKEN AIP, Japan; RIKEN AIP, Japan; RIKEN AIP, Japan", "aff_domain": "ms.k.u-tokyo.ac.jp; ; ", "email": "ms.k.u-tokyo.ac.jp; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21n.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "University of Tokyo;RIKEN AIP", "aff_unique_dep": ";", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://aip.Riken.jp", "aff_unique_abbr": "UTokyo;RIKEN AIP", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "Japan" }, { "title": "Learning Online Algorithms with Distributional Advice", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10065", "id": "10065", "proceeding": "http://proceedings.mlr.press/v139/diakonikolas21a.html", "slides": "/media/icml-2021/Slides/10065.pdf", "author_site": "Ilias Diakonikolas, Vasilis Kontonis, Christos Tzamos, Ali Vakilian, Nikos Zarifis", "author": "Ilias Diakonikolas; Vasilis Kontonis; Christos Tzamos; Ali Vakilian; Nikos Zarifis", "abstract": "We study the problem of designing online algorithms given advice about the input. While prior work had focused on deterministic advice, we only assume distributional access to the instances of interest, and the goal is to learn a competitive algorithm given access to i.i.d. samples. We aim to be competitive against an adversary with prior knowledge of the distribution, while also performing well against worst-case inputs. We focus on the classical online problems of ski-rental and prophet-inequalities, and provide sample complexity bounds for the underlying learning tasks. First, we point out that for general distributions it is information-theoretically impossible to beat the worst-case competitive-ratio with any finite sample size. As our main contribution, we establish strong positive results for well-behaved distributions. Specifically, for the broad class of log-concave distributions, we show that $\\mathrm{poly}(1/\\epsilon)$ samples suffice to obtain $(1+\\epsilon)$-competitive ratio. Finally, we show that this sample upper bound is close to best possible, even for very simple classes of distributions.", "bibtex": "@InProceedings{pmlr-v139-diakonikolas21a,\n title = \t {Learning Online Algorithms with Distributional Advice},\n author = {Diakonikolas, Ilias and Kontonis, Vasilis and Tzamos, Christos and Vakilian, Ali and Zarifis, Nikos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2687--2696},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/diakonikolas21a/diakonikolas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/diakonikolas21a.html},\n abstract = \t {We study the problem of designing online algorithms given advice about the input. While prior work had focused on deterministic advice, we only assume distributional access to the instances of interest, and the goal is to learn a competitive algorithm given access to i.i.d. samples. We aim to be competitive against an adversary with prior knowledge of the distribution, while also performing well against worst-case inputs. We focus on the classical online problems of ski-rental and prophet-inequalities, and provide sample complexity bounds for the underlying learning tasks. First, we point out that for general distributions it is information-theoretically impossible to beat the worst-case competitive-ratio with any finite sample size. As our main contribution, we establish strong positive results for well-behaved distributions. Specifically, for the broad class of log-concave distributions, we show that $\\mathrm{poly}(1/\\epsilon)$ samples suffice to obtain $(1+\\epsilon)$-competitive ratio. Finally, we show that this sample upper bound is close to best possible, even for very simple classes of distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/diakonikolas21a/diakonikolas21a.pdf", "supp": "", "pdf_size": 328657, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13219329994819426681&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Department of Computer Sciences, University of Wisconsin, Madison, Wisconsin, USA; Department of Computer Sciences, University of Wisconsin, Madison, Wisconsin, USA; Department of Computer Sciences, University of Wisconsin, Madison, Wisconsin, USA; Toyota Technological Institute at Chicago (TTIC), Chicago, Illinois, USA + Department of Computer Sciences, University of Wisconsin, Madison, Wisconsin, USA; Department of Computer Sciences, University of Wisconsin, Madison, Wisconsin, USA", "aff_domain": "wisc.edu;wisc.edu;wisc.edu;ttic.edu;wisc.edu", "email": "wisc.edu;wisc.edu;wisc.edu;ttic.edu;wisc.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/diakonikolas21a.html", "aff_unique_index": "0;0;0;1+0;0", "aff_unique_norm": "University of Wisconsin-Madison;Toyota Technological Institute at Chicago", "aff_unique_dep": "Department of Computer Sciences;", "aff_unique_url": "https://www.wisc.edu;https://www.ttic.edu", "aff_unique_abbr": "UW-Madison;TTIC", "aff_campus_unique_index": "0;0;0;1+0;0", "aff_campus_unique": "Madison;Chicago", "aff_country_unique_index": "0;0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Learning Optimal Auctions with Correlated Valuations from Samples", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8435", "id": "8435", "proceeding": "http://proceedings.mlr.press/v139/yang21b.html", "slides": "/media/icml-2021/Slides/8435.pdf", "author_site": "CHUNXUE YANG, Xiaohui Bei", "author": "Chunxue Yang; Xiaohui Bei", "abstract": "In single-item auction design, it is well known due to Cremer and McLean that when bidders\u2019 valuations are drawn from a correlated prior distribution, the auctioneer can extract full social surplus as revenue. However, in most real-world applications, the prior is usually unknown and can only be learned from historical data. In this work, we investigate the robustness of the optimal auction with correlated valuations via sample complexity analysis. We prove upper and lower bounds on the number of samples from the unknown prior required to learn a (1-epsilon)-approximately optimal auction. Our results reinforce the common belief that optimal correlated auctions are sensitive to the distribution parameters and hard to learn unless the prior distribution is well-behaved.", "bibtex": "@InProceedings{pmlr-v139-yang21b,\n title = \t {Learning Optimal Auctions with Correlated Valuations from Samples},\n author = {Yang, Chunxue and Bei, Xiaohui},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11716--11726},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21b/yang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21b.html},\n abstract = \t {In single-item auction design, it is well known due to Cremer and McLean that when bidders\u2019 valuations are drawn from a correlated prior distribution, the auctioneer can extract full social surplus as revenue. However, in most real-world applications, the prior is usually unknown and can only be learned from historical data. In this work, we investigate the robustness of the optimal auction with correlated valuations via sample complexity analysis. We prove upper and lower bounds on the number of samples from the unknown prior required to learn a (1-epsilon)-approximately optimal auction. Our results reinforce the common belief that optimal correlated auctions are sensitive to the distribution parameters and hard to learn unless the prior distribution is well-behaved.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21b/yang21b.pdf", "supp": "", "pdf_size": 345946, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7920017400157767471&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Physical and Mathematical Sciences, Nanyang Technological University, Singapore; School of Physical and Mathematical Sciences, Nanyang Technological University, Singapore", "aff_domain": "e.ntu.edu.sg;ntu.edu.sg", "email": "e.ntu.edu.sg;ntu.edu.sg", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yang21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Nanyang Technological University", "aff_unique_dep": "School of Physical and Mathematical Sciences", "aff_unique_url": "https://www.ntu.edu.sg", "aff_unique_abbr": "NTU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Singapore", "aff_country_unique_index": "0;0", "aff_country_unique": "Singapore" }, { "title": "Learning Queueing Policies for Organ Transplantation Allocation using Interpretable Counterfactual Survival Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9513", "id": "9513", "proceeding": "http://proceedings.mlr.press/v139/berrevoets21a.html", "slides": "", "author_site": "Jeroen Berrevoets, Ahmed Alaa, Zhaozhi Qian, James Jordon, alexander gimson, Mihaela van der Schaar", "author": "Jeroen Berrevoets; Ahmed Alaa; Zhaozhi Qian; James Jordon; Alexander E. S. Gimson; Mihaela van der Schaar", "abstract": "Organ transplantation is often the last resort for treating end-stage illnesses, but managing transplant wait-lists is challenging because of organ scarcity and the complexity of assessing donor-recipient compatibility. In this paper, we develop a data-driven model for (real-time) organ allocation using observational data for transplant outcomes. Our model integrates a queuing-theoretic framework with unsupervised learning to cluster the organs into \u201corgan types\u201d, and then construct priority queues (associated with each organ type) wherein incoming patients are assigned. To reason about organ allocations, the model uses synthetic controls to infer a patient\u2019s survival outcomes under counterfactual allocations to the different organ types{\u2013} the model is trained end-to-end to optimise the trade-off between patient waiting time and expected survival time. The usage of synthetic controls enable patient-level interpretations of allocation decisions that can be presented and understood by clinicians. We test our model on multiple data sets, and show that it outperforms other organ-allocation policies in terms of added life-years, and death count. Furthermore, we introduce a novel organ-allocation simulator to accurately test new policies.", "bibtex": "@InProceedings{pmlr-v139-berrevoets21a,\n title = \t {Learning Queueing Policies for Organ Transplantation Allocation using Interpretable Counterfactual Survival Analysis},\n author = {Berrevoets, Jeroen and Alaa, Ahmed and Qian, Zhaozhi and Jordon, James and Gimson, Alexander E. S. and van der Schaar, Mihaela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {792--802},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/berrevoets21a/berrevoets21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/berrevoets21a.html},\n abstract = \t {Organ transplantation is often the last resort for treating end-stage illnesses, but managing transplant wait-lists is challenging because of organ scarcity and the complexity of assessing donor-recipient compatibility. In this paper, we develop a data-driven model for (real-time) organ allocation using observational data for transplant outcomes. Our model integrates a queuing-theoretic framework with unsupervised learning to cluster the organs into \u201corgan types\u201d, and then construct priority queues (associated with each organ type) wherein incoming patients are assigned. To reason about organ allocations, the model uses synthetic controls to infer a patient\u2019s survival outcomes under counterfactual allocations to the different organ types{\u2013} the model is trained end-to-end to optimise the trade-off between patient waiting time and expected survival time. The usage of synthetic controls enable patient-level interpretations of allocation decisions that can be presented and understood by clinicians. We test our model on multiple data sets, and show that it outperforms other organ-allocation policies in terms of added life-years, and death count. Furthermore, we introduce a novel organ-allocation simulator to accurately test new policies.}\n}", "pdf": "http://proceedings.mlr.press/v139/berrevoets21a/berrevoets21a.pdf", "supp": "", "pdf_size": 2101489, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6354574471522688027&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Applied Mathematics and Theoretical Physics, University of Cambridge, Cambridge, UK+The Alan Turing Institute, London, UK; Department of Electrical Engineering, University of California, Los Angeles (UCLA), Los Angeles CA, USA+The Alan Turing Institute, London, UK; Department of Applied Mathematics and Theoretical Physics, University of Cambridge, Cambridge, UK; Department of Engineering Science, University of Oxford, Oxford, UK; Cambridge University Hospitals, Cambridge, UK; Department of Electrical Engineering, University of California, Los Angeles (UCLA), Los Angeles CA, USA+The Alan Turing Institute, London, UK", "aff_domain": "maths.cam.ac.uk; ; ; ; ; ", "email": "maths.cam.ac.uk; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/berrevoets21a.html", "aff_unique_index": "0+1;2+1;0;3;4;2+1", "aff_unique_norm": "University of Cambridge;Alan Turing Institute;University of California, Los Angeles;University of Oxford;Cambridge University Hospitals", "aff_unique_dep": "Department of Applied Mathematics and Theoretical Physics;;Department of Electrical Engineering;Department of Engineering Science;", "aff_unique_url": "https://www.cam.ac.uk;https://www.turing.ac.uk;https://www.ucla.edu;https://www.ox.ac.uk;https://www.cuh.org.uk", "aff_unique_abbr": "Cambridge;ATI;UCLA;Oxford;", "aff_campus_unique_index": "0+1;2+1;0;3;0;2+1", "aff_campus_unique": "Cambridge;London;Los Angeles;Oxford", "aff_country_unique_index": "0+0;1+0;0;0;0;1+0", "aff_country_unique": "United Kingdom;United States" }, { "title": "Learning Randomly Perturbed Structured Predictors for Direct Loss Minimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9319", "id": "9319", "proceeding": "http://proceedings.mlr.press/v139/indelman21a.html", "slides": "/media/icml-2021/Slides/9319.pdf", "author_site": "Hedda Cohen Indelman, Tamir Hazan", "author": "Hedda Cohen Indelman; Tamir Hazan", "abstract": "Direct loss minimization is a popular approach for learning predictors over structured label spaces. This approach is computationally appealing as it replaces integration with optimization and allows to propagate gradients in a deep net using loss-perturbed prediction. Recently, this technique was extended to generative models, by introducing a randomized predictor that samples a structure from a randomly perturbed score function. In this work, we interpolate between these techniques by learning the variance of randomized structured predictors as well as their mean, in order to balance between the learned score function and the randomized noise. We demonstrate empirically the effectiveness of learning this balance in structured discrete spaces.", "bibtex": "@InProceedings{pmlr-v139-indelman21a,\n title = \t {Learning Randomly Perturbed Structured Predictors for Direct Loss Minimization},\n author = {Indelman, Hedda Cohen and Hazan, Tamir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4585--4595},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/indelman21a/indelman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/indelman21a.html},\n abstract = \t {Direct loss minimization is a popular approach for learning predictors over structured label spaces. This approach is computationally appealing as it replaces integration with optimization and allows to propagate gradients in a deep net using loss-perturbed prediction. Recently, this technique was extended to generative models, by introducing a randomized predictor that samples a structure from a randomly perturbed score function. In this work, we interpolate between these techniques by learning the variance of randomized structured predictors as well as their mean, in order to balance between the learned score function and the randomized noise. We demonstrate empirically the effectiveness of learning this balance in structured discrete spaces.}\n}", "pdf": "http://proceedings.mlr.press/v139/indelman21a/indelman21a.pdf", "supp": "", "pdf_size": 742770, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6521871878208082553&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Technion; Technion", "aff_domain": "campus.technion.ac.il; ", "email": "campus.technion.ac.il; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/indelman21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Technion - Israel Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.technion.ac.il/en/", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Learning Representations by Humans, for Humans", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9071", "id": "9071", "proceeding": "http://proceedings.mlr.press/v139/hilgard21a.html", "slides": "", "author_site": "Sophie Hilgard, Nir Rosenfeld, Mahzarin Banaji, Jack Cao, David Parkes", "author": "Sophie Hilgard; Nir Rosenfeld; Mahzarin R Banaji; Jack Cao; David Parkes", "abstract": "When machine predictors can achieve higher performance than the human decision-makers they support, improving the performance of human decision-makers is often conflated with improving machine accuracy. Here we propose a framework to directly support human decision-making, in which the role of machines is to reframe problems rather than to prescribe actions through prediction. Inspired by the success of representation learning in improving performance of machine predictors, our framework learns human-facing representations optimized for human performance. This \u201cMind Composed with Machine\u201d framework incorporates a human decision-making model directly into the representation learning paradigm and is trained with a novel human-in-the-loop training procedure. We empirically demonstrate the successful application of the framework to various tasks and representational forms.", "bibtex": "@InProceedings{pmlr-v139-hilgard21a,\n title = \t {Learning Representations by Humans, for Humans},\n author = {Hilgard, Sophie and Rosenfeld, Nir and Banaji, Mahzarin R and Cao, Jack and Parkes, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4227--4238},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hilgard21a/hilgard21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hilgard21a.html},\n abstract = \t {When machine predictors can achieve higher performance than the human decision-makers they support, improving the performance of human decision-makers is often conflated with improving machine accuracy. Here we propose a framework to directly support human decision-making, in which the role of machines is to reframe problems rather than to prescribe actions through prediction. Inspired by the success of representation learning in improving performance of machine predictors, our framework learns human-facing representations optimized for human performance. This \u201cMind Composed with Machine\u201d framework incorporates a human decision-making model directly into the representation learning paradigm and is trained with a novel human-in-the-loop training procedure. We empirically demonstrate the successful application of the framework to various tasks and representational forms.}\n}", "pdf": "http://proceedings.mlr.press/v139/hilgard21a/hilgard21a.pdf", "supp": "", "pdf_size": 2005205, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5051358405309403520&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hilgard21a.html" }, { "title": "Learning Routines for Effective Off-Policy Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9365", "id": "9365", "proceeding": "http://proceedings.mlr.press/v139/cetin21a.html", "slides": "", "author_site": "Edoardo Cetin, Oya Celiktutan", "author": "Edoardo Cetin; Oya Celiktutan", "abstract": "The performance of reinforcement learning depends upon designing an appropriate action space, where the effect of each action is measurable, yet, granular enough to permit flexible behavior. So far, this process involved non-trivial user choices in terms of the available actions and their execution frequency. We propose a novel framework for reinforcement learning that effectively lifts such constraints. Within our framework, agents learn effective behavior over a routine space: a new, higher-level action space, where each routine represents a set of \u2019equivalent\u2019 sequences of granular actions with arbitrary length. Our routine space is learned end-to-end to facilitate the accomplishment of underlying off-policy reinforcement learning objectives. We apply our framework to two state-of-the-art off-policy algorithms and show that the resulting agents obtain relevant performance improvements while requiring fewer interactions with the environment per episode, improving computational efficiency.", "bibtex": "@InProceedings{pmlr-v139-cetin21a,\n title = \t {Learning Routines for Effective Off-Policy Reinforcement Learning},\n author = {Cetin, Edoardo and Celiktutan, Oya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1384--1394},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cetin21a/cetin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cetin21a.html},\n abstract = \t {The performance of reinforcement learning depends upon designing an appropriate action space, where the effect of each action is measurable, yet, granular enough to permit flexible behavior. So far, this process involved non-trivial user choices in terms of the available actions and their execution frequency. We propose a novel framework for reinforcement learning that effectively lifts such constraints. Within our framework, agents learn effective behavior over a routine space: a new, higher-level action space, where each routine represents a set of \u2019equivalent\u2019 sequences of granular actions with arbitrary length. Our routine space is learned end-to-end to facilitate the accomplishment of underlying off-policy reinforcement learning objectives. We apply our framework to two state-of-the-art off-policy algorithms and show that the resulting agents obtain relevant performance improvements while requiring fewer interactions with the environment per episode, improving computational efficiency.}\n}", "pdf": "http://proceedings.mlr.press/v139/cetin21a/cetin21a.pdf", "supp": "", "pdf_size": 773112, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8977380145851493236&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Centre for Robotics Research, Department of Engineering, King\u2019s College London; Centre for Robotics Research, Department of Engineering, King\u2019s College London", "aff_domain": "kcl.ac.uk; ", "email": "kcl.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/cetin21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "King\u2019s College London", "aff_unique_dep": "Department of Engineering", "aff_unique_url": "https://www.kcl.ac.uk", "aff_unique_abbr": "KCL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Learning Self-Modulating Attention in Continuous Time Space with Applications to Sequential Recommendation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8989", "id": "8989", "proceeding": "http://proceedings.mlr.press/v139/chen21h.html", "slides": "", "author_site": "Chao Chen, Haoyu Geng, Nianzu Yang, Junchi Yan, Daiyue Xue, Jianping Yu, Xiaokang Yang", "author": "Chao Chen; Haoyu Geng; Nianzu Yang; Junchi Yan; Daiyue Xue; Jianping Yu; Xiaokang Yang", "abstract": "User interests are usually dynamic in the real world, which poses both theoretical and practical challenges for learning accurate preferences from rich behavior data. Among existing user behavior modeling solutions, attention networks are widely adopted for its effectiveness and relative simplicity. Despite being extensively studied, existing attentions still suffer from two limitations: i) conventional attentions mainly take into account the spatial correlation between user behaviors, regardless the distance between those behaviors in the continuous time space; and ii) these attentions mostly provide a dense and undistinguished distribution over all past behaviors then attentively encode them into the output latent representations. This is however not suitable in practical scenarios where a user\u2019s future actions are relevant to a small subset of her/his historical behaviors. In this paper, we propose a novel attention network, named \\textit{self-modulating attention}, that models the complex and non-linearly evolving dynamic user preferences. We empirically demonstrate the effectiveness of our method on top-N sequential recommendation tasks, and the results on three large-scale real-world datasets show that our model can achieve state-of-the-art performance.", "bibtex": "@InProceedings{pmlr-v139-chen21h,\n title = \t {Learning Self-Modulating Attention in Continuous Time Space with Applications to Sequential Recommendation},\n author = {Chen, Chao and Geng, Haoyu and Yang, Nianzu and Yan, Junchi and Xue, Daiyue and Yu, Jianping and Yang, Xiaokang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1606--1616},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21h/chen21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21h.html},\n abstract = \t {User interests are usually dynamic in the real world, which poses both theoretical and practical challenges for learning accurate preferences from rich behavior data. Among existing user behavior modeling solutions, attention networks are widely adopted for its effectiveness and relative simplicity. Despite being extensively studied, existing attentions still suffer from two limitations: i) conventional attentions mainly take into account the spatial correlation between user behaviors, regardless the distance between those behaviors in the continuous time space; and ii) these attentions mostly provide a dense and undistinguished distribution over all past behaviors then attentively encode them into the output latent representations. This is however not suitable in practical scenarios where a user\u2019s future actions are relevant to a small subset of her/his historical behaviors. In this paper, we propose a novel attention network, named \\textit{self-modulating attention}, that models the complex and non-linearly evolving dynamic user preferences. We empirically demonstrate the effectiveness of our method on top-N sequential recommendation tasks, and the results on three large-scale real-world datasets show that our model can achieve state-of-the-art performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21h/chen21h.pdf", "supp": "", "pdf_size": 406212, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16476778005591065966&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Meituan, Beijing, China; Meituan, Beijing, China; MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China+Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China", "aff_domain": "sjtu.edu.cn; ; ; ; ; ; ", "email": "sjtu.edu.cn; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/chen21h.html", "aff_unique_index": "0+0;0+0;0+0;0+0;1;1;0+0", "aff_unique_norm": "Shanghai Jiao Tong University;Meituan", "aff_unique_dep": "AI Institute;", "aff_unique_url": "https://www.sjtu.edu.cn;https://www.meituan.com", "aff_unique_abbr": "SJTU;Meituan", "aff_campus_unique_index": "0+0;0+0;0+0;0+0;1;1;0+0", "aff_campus_unique": "Shanghai;Beijing", "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0+0", "aff_country_unique": "China" }, { "title": "Learning Stochastic Behaviour from Aggregate Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8959", "id": "8959", "proceeding": "http://proceedings.mlr.press/v139/ma21c.html", "slides": "", "author_site": "Shaojun Ma, Shu Liu, Hongyuan Zha, Haomin Zhou", "author": "Shaojun Ma; Shu Liu; Hongyuan Zha; Haomin Zhou", "abstract": "Learning nonlinear dynamics from aggregate data is a challenging problem because the full trajectory of each individual is not available, namely, the individual observed at one time may not be observed at the next time point, or the identity of individual is unavailable. This is in sharp contrast to learning dynamics with full trajectory data, on which the majority of existing methods are based. We propose a novel method using the weak form of Fokker Planck Equation (FPE) \u2014 a partial differential equation \u2014 to describe the density evolution of data in a sampled form, which is then combined with Wasserstein generative adversarial network (WGAN) in the training process. In such a sample-based framework we are able to learn the nonlinear dynamics from aggregate data without explicitly solving the partial differential equation (PDE) FPE. We demonstrate our approach in the context of a series of synthetic and real-world data sets.", "bibtex": "@InProceedings{pmlr-v139-ma21c,\n title = \t {Learning Stochastic Behaviour from Aggregate Data},\n author = {Ma, Shaojun and Liu, Shu and Zha, Hongyuan and Zhou, Haomin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7258--7267},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ma21c/ma21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/ma21c.html},\n abstract = \t {Learning nonlinear dynamics from aggregate data is a challenging problem because the full trajectory of each individual is not available, namely, the individual observed at one time may not be observed at the next time point, or the identity of individual is unavailable. This is in sharp contrast to learning dynamics with full trajectory data, on which the majority of existing methods are based. We propose a novel method using the weak form of Fokker Planck Equation (FPE) \u2014 a partial differential equation \u2014 to describe the density evolution of data in a sampled form, which is then combined with Wasserstein generative adversarial network (WGAN) in the training process. In such a sample-based framework we are able to learn the nonlinear dynamics from aggregate data without explicitly solving the partial differential equation (PDE) FPE. We demonstrate our approach in the context of a series of synthetic and real-world data sets.}\n}", "pdf": "http://proceedings.mlr.press/v139/ma21c/ma21c.pdf", "supp": "", "pdf_size": 4182005, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17518511180735812807&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, Georgia Institute of Technology; Department of Mathematics, Georgia Institute of Technology; School of Data Science, Shenzhen Research Institute of Big Data, The Chinese University of Hong Kong, Shenzhen; Department of Mathematics, Georgia Institute of Technology", "aff_domain": "gatech.edu; ; ; ", "email": "gatech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ma21c.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Georgia Institute of Technology;Chinese University of Hong Kong, Shenzhen", "aff_unique_dep": "Department of Mathematics;School of Data Science", "aff_unique_url": "https://www.gatech.edu;https://www.szhk.edu.cn", "aff_unique_abbr": "Georgia Tech;CUHK(SZ)", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Atlanta;Shenzhen", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Learning Task Informed Abstractions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9551", "id": "9551", "proceeding": "http://proceedings.mlr.press/v139/fu21b.html", "slides": "/media/icml-2021/Slides/9551.pdf", "author_site": "Xiang Fu, Ge Yang, Pulkit Agrawal, Tommi Jaakkola", "author": "Xiang Fu; Ge Yang; Pulkit Agrawal; Tommi Jaakkola", "abstract": "Current model-based reinforcement learning methods struggle when operating from complex visual scenes due to their inability to prioritize task-relevant features. To mitigate this problem, we propose learning Task Informed Abstractions (TIA) that explicitly separates reward-correlated visual features from distractors. For learning TIA, we introduce the formalism of Task Informed MDP (TiMDP) that is realized by training two models that learn visual features via cooperative reconstruction, but one model is adversarially dissociated from the reward signal. Empirical evaluation shows that TIA leads to significant performance gains over state-of-the-art methods on many visual control tasks where natural and unconstrained visual distractions pose a formidable challenge. Project page: https://xiangfu.co/tia", "bibtex": "@InProceedings{pmlr-v139-fu21b,\n title = \t {Learning Task Informed Abstractions},\n author = {Fu, Xiang and Yang, Ge and Agrawal, Pulkit and Jaakkola, Tommi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3480--3491},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fu21b/fu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/fu21b.html},\n abstract = \t {Current model-based reinforcement learning methods struggle when operating from complex visual scenes due to their inability to prioritize task-relevant features. To mitigate this problem, we propose learning Task Informed Abstractions (TIA) that explicitly separates reward-correlated visual features from distractors. For learning TIA, we introduce the formalism of Task Informed MDP (TiMDP) that is realized by training two models that learn visual features via cooperative reconstruction, but one model is adversarially dissociated from the reward signal. Empirical evaluation shows that TIA leads to significant performance gains over state-of-the-art methods on many visual control tasks where natural and unconstrained visual distractions pose a formidable challenge. Project page: https://xiangfu.co/tia}\n}", "pdf": "http://proceedings.mlr.press/v139/fu21b/fu21b.pdf", "supp": "", "pdf_size": 3565006, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2332386988369186148&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "MIT CSAIL; MIT CSAIL + IAIFI; MIT CSAIL + IAIFI; MIT CSAIL", "aff_domain": "csail.mit.edu; ; ; ", "email": "csail.mit.edu; ; ; ", "github": "", "project": "https://xiangfu.co/tia", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fu21b.html", "aff_unique_index": "0;0+1;0+1;0", "aff_unique_norm": "Massachusetts Institute of Technology;International Association for Artificial Intelligence and Informatics", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://www.csail.mit.edu;https://iaifi.org", "aff_unique_abbr": "MIT CSAIL;IAIFI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0+1;0+1;0", "aff_country_unique": "United States;Unknown" }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9193", "id": "9193", "proceeding": "http://proceedings.mlr.press/v139/radford21a.html", "slides": "/media/icml-2021/Slides/9193.pdf", "author_site": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever", "author": "Alec Radford; Jong Wook Kim; Chris Hallacy; Aditya Ramesh; Gabriel Goh; Sandhini Agarwal; Girish Sastry; Amanda Askell; Pamela Mishkin; Jack Clark; Gretchen Krueger; Ilya Sutskever", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on.", "bibtex": "@InProceedings{pmlr-v139-radford21a,\n title = \t {Learning Transferable Visual Models From Natural Language Supervision},\n author = {Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and Krueger, Gretchen and Sutskever, Ilya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8748--8763},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/radford21a/radford21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/radford21a.html},\n abstract = \t {State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on.}\n}", "pdf": "http://proceedings.mlr.press/v139/radford21a/radford21a.pdf", "supp": "", "pdf_size": 3526772, "gs_citation": 35305, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15031020161691567042&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 22, "aff": "OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI", "aff_domain": "openai.com;openai.com; ; ; ; ; ; ; ; ; ;", "email": "openai.com;openai.com; ; ; ; ; ; ; ; ; ;", "github": "https://github.com/OpenAI/CLIP", "project": "", "author_num": 12, "oa": "https://proceedings.mlr.press/v139/radford21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "OpenAI", "aff_unique_dep": "", "aff_unique_url": "https://openai.com", "aff_unique_abbr": "OpenAI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning While Playing in Mean-Field Games: Convergence and Optimality", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9389", "id": "9389", "proceeding": "http://proceedings.mlr.press/v139/xie21g.html", "slides": "", "author_site": "Qiaomin Xie, Zhuoran Yang, Zhaoran Wang, Andreea Minca", "author": "Qiaomin Xie; Zhuoran Yang; Zhaoran Wang; Andreea Minca", "abstract": "We study reinforcement learning in mean-field games. To achieve the Nash equilibrium, which consists of a policy and a mean-field state, existing algorithms require obtaining the optimal policy while fixing any mean-field state. In practice, however, the policy and the mean-field state evolve simultaneously, as each agent is learning while playing. To bridge such a gap, we propose a fictitious play algorithm, which alternatively updates the policy (learning) and the mean-field state (playing) by one step of policy optimization and gradient descent, respectively. Despite the nonstationarity induced by such an alternating scheme, we prove that the proposed algorithm converges to the Nash equilibrium with an explicit convergence rate. To the best of our knowledge, it is the first provably efficient algorithm that achieves learning while playing via alternating updates.", "bibtex": "@InProceedings{pmlr-v139-xie21g,\n title = \t {Learning While Playing in Mean-Field Games: Convergence and Optimality},\n author = {Xie, Qiaomin and Yang, Zhuoran and Wang, Zhaoran and Minca, Andreea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11436--11447},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21g/xie21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21g.html},\n abstract = \t {We study reinforcement learning in mean-field games. To achieve the Nash equilibrium, which consists of a policy and a mean-field state, existing algorithms require obtaining the optimal policy while fixing any mean-field state. In practice, however, the policy and the mean-field state evolve simultaneously, as each agent is learning while playing. To bridge such a gap, we propose a fictitious play algorithm, which alternatively updates the policy (learning) and the mean-field state (playing) by one step of policy optimization and gradient descent, respectively. Despite the nonstationarity induced by such an alternating scheme, we prove that the proposed algorithm converges to the Nash equilibrium with an explicit convergence rate. To the best of our knowledge, it is the first provably efficient algorithm that achieves learning while playing via alternating updates.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21g/xie21g.pdf", "supp": "", "pdf_size": 415814, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12008770931246665745&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Operations Research and Information Engineering, Cornell University; Department of Operations Research and Financial Engineering, Princeton University; Department of Industrial Engineering and Management Sciences, Northwestern University; School of Operations Research and Information Engineering, Cornell University", "aff_domain": "cornell.edu; ; ; ", "email": "cornell.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xie21g.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Cornell University;Princeton University;Northwestern University", "aff_unique_dep": "School of Operations Research and Information Engineering;Department of Operations Research and Financial Engineering;Department of Industrial Engineering and Management Sciences", "aff_unique_url": "https://www.cornell.edu;https://www.princeton.edu;https://www.northwestern.edu", "aff_unique_abbr": "Cornell;Princeton;NU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning a Universal Template for Few-shot Dataset Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10585", "id": "10585", "proceeding": "http://proceedings.mlr.press/v139/triantafillou21a.html", "slides": "/media/icml-2021/Slides/10585.pdf", "author_site": "Eleni Triantafillou, Hugo Larochelle, Richard Zemel, Vincent Dumoulin", "author": "Eleni Triantafillou; Hugo Larochelle; Richard Zemel; Vincent Dumoulin", "abstract": "Few-shot dataset generalization is a challenging variant of the well-studied few-shot classification problem where a diverse training set of several datasets is given, for the purpose of training an adaptable model that can then learn classes from \\emph{new datasets} using only a few examples. To this end, we propose to utilize the diverse training set to construct a \\emph{universal template}: a partial model that can define a wide array of dataset-specialized models, by plugging in appropriate components. For each new few-shot classification problem, our approach therefore only requires inferring a small number of parameters to insert into the universal template. We design a separate network that produces an initialization of those parameters for each given task, and we then fine-tune its proposed initialization via a few steps of gradient descent. Our approach is more parameter-efficient, scalable and adaptable compared to previous methods, and achieves the state-of-the-art on the challenging Meta-Dataset benchmark.", "bibtex": "@InProceedings{pmlr-v139-triantafillou21a,\n title = \t {Learning a Universal Template for Few-shot Dataset Generalization},\n author = {Triantafillou, Eleni and Larochelle, Hugo and Zemel, Richard and Dumoulin, Vincent},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10424--10433},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/triantafillou21a/triantafillou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/triantafillou21a.html},\n abstract = \t {Few-shot dataset generalization is a challenging variant of the well-studied few-shot classification problem where a diverse training set of several datasets is given, for the purpose of training an adaptable model that can then learn classes from \\emph{new datasets} using only a few examples. To this end, we propose to utilize the diverse training set to construct a \\emph{universal template}: a partial model that can define a wide array of dataset-specialized models, by plugging in appropriate components. For each new few-shot classification problem, our approach therefore only requires inferring a small number of parameters to insert into the universal template. We design a separate network that produces an initialization of those parameters for each given task, and we then fine-tune its proposed initialization via a few steps of gradient descent. Our approach is more parameter-efficient, scalable and adaptable compared to previous methods, and achieves the state-of-the-art on the challenging Meta-Dataset benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/triantafillou21a/triantafillou21a.pdf", "supp": "", "pdf_size": 660476, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1180369253723418240&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Toronto, Vector Institute; Google Research, Brain Team; University of Toronto, Vector Institute; Google Research, Brain Team", "aff_domain": "cs.toronto.edu; ; ; ", "email": "cs.toronto.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/triantafillou21a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "University of Toronto;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.utoronto.ca;https://research.google", "aff_unique_abbr": "U of T;Google", "aff_campus_unique_index": "0;1;0;1", "aff_campus_unique": "Toronto;Mountain View", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "Canada;United States" }, { "title": "Learning and Planning in Average-Reward Markov Decision Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10105", "id": "10105", "proceeding": "http://proceedings.mlr.press/v139/wan21a.html", "slides": "/media/icml-2021/Slides/10105.pdf", "author_site": "Yi Wan, Abhishek Naik, Richard Sutton", "author": "Yi Wan; Abhishek Naik; Richard S Sutton", "abstract": "We introduce learning and planning algorithms for average-reward MDPs, including 1) the first general proven-convergent off-policy model-free control algorithm without reference states, 2) the first proven-convergent off-policy model-free prediction algorithm, and 3) the first off-policy learning algorithm that converges to the actual value function rather than to the value function plus an offset. All of our algorithms are based on using the temporal-difference error rather than the conventional error when updating the estimate of the average reward. Our proof techniques are a slight generalization of those by Abounadi, Bertsekas, and Borkar (2001). In experiments with an Access-Control Queuing Task, we show some of the difficulties that can arise when using methods that rely on reference states and argue that our new algorithms are significantly easier to use.", "bibtex": "@InProceedings{pmlr-v139-wan21a,\n title = \t {Learning and Planning in Average-Reward Markov Decision Processes},\n author = {Wan, Yi and Naik, Abhishek and Sutton, Richard S},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10653--10662},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wan21a/wan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wan21a.html},\n abstract = \t {We introduce learning and planning algorithms for average-reward MDPs, including 1) the first general proven-convergent off-policy model-free control algorithm without reference states, 2) the first proven-convergent off-policy model-free prediction algorithm, and 3) the first off-policy learning algorithm that converges to the actual value function rather than to the value function plus an offset. All of our algorithms are based on using the temporal-difference error rather than the conventional error when updating the estimate of the average reward. Our proof techniques are a slight generalization of those by Abounadi, Bertsekas, and Borkar (2001). In experiments with an Access-Control Queuing Task, we show some of the difficulties that can arise when using methods that rely on reference states and argue that our new algorithms are significantly easier to use.}\n}", "pdf": "http://proceedings.mlr.press/v139/wan21a/wan21a.pdf", "supp": "", "pdf_size": 2139548, "gs_citation": 93, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=750901868273869826&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "University of Alberta and Alberta Machine Intelligence Institute (Amii), Edmonton, Canada; University of Alberta and Alberta Machine Intelligence Institute (Amii), Edmonton, Canada; DeepMind", "aff_domain": "ualberta.ca;ualberta.ca; ", "email": "ualberta.ca;ualberta.ca; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wan21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Alberta;DeepMind", "aff_unique_dep": ";", "aff_unique_url": "https://www.ualberta.ca;https://deepmind.com", "aff_unique_abbr": "UAlberta;DeepMind", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edmonton;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Canada;United Kingdom" }, { "title": "Learning and Planning in Complex Action Spaces", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10413", "id": "10413", "proceeding": "http://proceedings.mlr.press/v139/hubert21a.html", "slides": "", "author_site": "Thomas Hubert, Julian Schrittwieser, Ioannis Antonoglou, Mohammadamin Barekatain, Simon Schmitt, David Silver", "author": "Thomas Hubert; Julian Schrittwieser; Ioannis Antonoglou; Mohammadamin Barekatain; Simon Schmitt; David Silver", "abstract": "Many important real-world problems have action spaces that are high-dimensional, continuous or both, making full enumeration of all possible actions infeasible. Instead, only small subsets of actions can be sampled for the purpose of policy evaluation and improvement. In this paper, we propose a general framework to reason in a principled way about policy evaluation and improvement over such sampled action subsets. This sample-based policy iteration framework can in principle be applied to any reinforcement learning algorithm based upon policy iteration. Concretely, we propose Sampled MuZero, an extension of the MuZero algorithm that is able to learn in domains with arbitrarily complex action spaces by planning over sampled actions. We demonstrate this approach on the classical board game of Go and on two continuous control benchmark domains: DeepMind Control Suite and Real-World RL Suite.", "bibtex": "@InProceedings{pmlr-v139-hubert21a,\n title = \t {Learning and Planning in Complex Action Spaces},\n author = {Hubert, Thomas and Schrittwieser, Julian and Antonoglou, Ioannis and Barekatain, Mohammadamin and Schmitt, Simon and Silver, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4476--4486},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hubert21a/hubert21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hubert21a.html},\n abstract = \t {Many important real-world problems have action spaces that are high-dimensional, continuous or both, making full enumeration of all possible actions infeasible. Instead, only small subsets of actions can be sampled for the purpose of policy evaluation and improvement. In this paper, we propose a general framework to reason in a principled way about policy evaluation and improvement over such sampled action subsets. This sample-based policy iteration framework can in principle be applied to any reinforcement learning algorithm based upon policy iteration. Concretely, we propose Sampled MuZero, an extension of the MuZero algorithm that is able to learn in domains with arbitrarily complex action spaces by planning over sampled actions. We demonstrate this approach on the classical board game of Go and on two continuous control benchmark domains: DeepMind Control Suite and Real-World RL Suite.}\n}", "pdf": "http://proceedings.mlr.press/v139/hubert21a/hubert21a.pdf", "supp": "", "pdf_size": 1478092, "gs_citation": 108, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3146071618392752821&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ; ; ; ", "email": "google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/hubert21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Learning by Turning: Neural Architecture Aware Optimisation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8853", "id": "8853", "proceeding": "http://proceedings.mlr.press/v139/liu21c.html", "slides": "", "author_site": "Yang Liu, Jeremy Bernstein, Markus Meister, Yisong Yue", "author": "Yang Liu; Jeremy Bernstein; Markus Meister; Yisong Yue", "abstract": "Descent methods for deep networks are notoriously capricious: they require careful tuning of step size, momentum and weight decay, and which method will work best on a new benchmark is a priori unclear. To address this problem, this paper conducts a combined study of neural architecture and optimisation, leading to a new optimiser called Nero: the neuronal rotator. Nero trains reliably without momentum or weight decay, works in situations where Adam and SGD fail, and requires little to no learning rate tuning. Also, Nero\u2019s memory footprint is \u00a0 square root that of Adam or LAMB. Nero combines two ideas: (1) projected gradient descent over the space of balanced networks; (2) neuron-specific updates, where the step size sets the angle through which each neuron\u2019s hyperplane turns. The paper concludes by discussing how this geometric connection between architecture and optimisation may impact theories of generalisation in deep learning.", "bibtex": "@InProceedings{pmlr-v139-liu21c,\n title = \t {Learning by Turning: Neural Architecture Aware Optimisation},\n author = {Liu, Yang and Bernstein, Jeremy and Meister, Markus and Yue, Yisong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6748--6758},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21c/liu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21c.html},\n abstract = \t {Descent methods for deep networks are notoriously capricious: they require careful tuning of step size, momentum and weight decay, and which method will work best on a new benchmark is a priori unclear. To address this problem, this paper conducts a combined study of neural architecture and optimisation, leading to a new optimiser called Nero: the neuronal rotator. Nero trains reliably without momentum or weight decay, works in situations where Adam and SGD fail, and requires little to no learning rate tuning. Also, Nero\u2019s memory footprint is \u00a0 square root that of Adam or LAMB. Nero combines two ideas: (1) projected gradient descent over the space of balanced networks; (2) neuron-specific updates, where the step size sets the angle through which each neuron\u2019s hyperplane turns. The paper concludes by discussing how this geometric connection between architecture and optimisation may impact theories of generalisation in deep learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21c/liu21c.pdf", "supp": "", "pdf_size": 2253744, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9218227008920600415&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Abacus.AI; Caltech; Caltech; Caltech", "aff_domain": "abacus.ai;caltech.edu; ; ", "email": "abacus.ai;caltech.edu; ; ", "github": "github.com/jxbz/nero", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21c.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Abacus.AI;California Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.abacus.ai;https://www.caltech.edu", "aff_unique_abbr": "Abacus.AI;Caltech", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Pasadena", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning de-identified representations of prosody from raw audio", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8805", "id": "8805", "proceeding": "http://proceedings.mlr.press/v139/weston21a.html", "slides": "/media/icml-2021/Slides/8805.pdf", "author_site": "Jack Weston, Raphael Lenain, Udeepa Meepegama, Emil Fristed", "author": "Jack Weston; Raphael Lenain; Udeepa Meepegama; Emil Fristed", "abstract": "We propose a method for learning de-identified prosody representations from raw audio using a contrastive self-supervised signal. Whereas prior work has relied on conditioning models with bottlenecks, we introduce a set of inductive biases that exploit the natural structure of prosody to minimize timbral information and decouple prosody from speaker representations. Despite aggressive downsampling of the input and having no access to linguistic information, our model performs comparably to state-of-the-art speech representations on DAMMP, a new benchmark we introduce for spoken language understanding. We use minimum description length probing to show that our representations have selectively learned the subcomponents of non-timbral prosody, and that the product quantizer naturally disentangles them without using bottlenecks. We derive an information-theoretic definition of speech de-identifiability and use it to demonstrate that our prosody representations are less identifiable than the other speech representations.", "bibtex": "@InProceedings{pmlr-v139-weston21a,\n title = \t {Learning de-identified representations of prosody from raw audio},\n author = {Weston, Jack and Lenain, Raphael and Meepegama, Udeepa and Fristed, Emil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11134--11145},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/weston21a/weston21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/weston21a.html},\n abstract = \t {We propose a method for learning de-identified prosody representations from raw audio using a contrastive self-supervised signal. Whereas prior work has relied on conditioning models with bottlenecks, we introduce a set of inductive biases that exploit the natural structure of prosody to minimize timbral information and decouple prosody from speaker representations. Despite aggressive downsampling of the input and having no access to linguistic information, our model performs comparably to state-of-the-art speech representations on DAMMP, a new benchmark we introduce for spoken language understanding. We use minimum description length probing to show that our representations have selectively learned the subcomponents of non-timbral prosody, and that the product quantizer naturally disentangles them without using bottlenecks. We derive an information-theoretic definition of speech de-identifiability and use it to demonstrate that our prosody representations are less identifiable than the other speech representations.}\n}", "pdf": "http://proceedings.mlr.press/v139/weston21a/weston21a.pdf", "supp": "", "pdf_size": 899335, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4580146081649113661&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Novoic, London, UK; Novoic, London, UK; Novoic, London, UK; Novoic, London, UK", "aff_domain": "novoic.com; ; ; ", "email": "novoic.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/weston21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Novoic", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Learning disentangled representations via product manifold projection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9635", "id": "9635", "proceeding": "http://proceedings.mlr.press/v139/fumero21a.html", "slides": "", "author_site": "Marco Fumero, Luca Cosmo, Simone Melzi, Emanuele Rodola", "author": "Marco Fumero; Luca Cosmo; Simone Melzi; Emanuele Rodola", "abstract": "We propose a novel approach to disentangle the generative factors of variation underlying a given set of observations. Our method builds upon the idea that the (unknown) low-dimensional manifold underlying the data space can be explicitly modeled as a product of submanifolds. This definition of disentanglement gives rise to a novel weakly-supervised algorithm for recovering the unknown explanatory factors behind the data. At training time, our algorithm only requires pairs of non i.i.d. data samples whose elements share at least one, possibly multidimensional, generative factor of variation. We require no knowledge on the nature of these transformations, and do not make any limiting assumption on the properties of each subspace. Our approach is easy to implement, and can be successfully applied to different kinds of data (from images to 3D surfaces) undergoing arbitrary transformations. In addition to standard synthetic benchmarks, we showcase our method in challenging real-world applications, where we compare favorably with the state of the art.", "bibtex": "@InProceedings{pmlr-v139-fumero21a,\n title = \t {Learning disentangled representations via product manifold projection},\n author = {Fumero, Marco and Cosmo, Luca and Melzi, Simone and Rodola, Emanuele},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3530--3540},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fumero21a/fumero21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fumero21a.html},\n abstract = \t {We propose a novel approach to disentangle the generative factors of variation underlying a given set of observations. Our method builds upon the idea that the (unknown) low-dimensional manifold underlying the data space can be explicitly modeled as a product of submanifolds. This definition of disentanglement gives rise to a novel weakly-supervised algorithm for recovering the unknown explanatory factors behind the data. At training time, our algorithm only requires pairs of non i.i.d. data samples whose elements share at least one, possibly multidimensional, generative factor of variation. We require no knowledge on the nature of these transformations, and do not make any limiting assumption on the properties of each subspace. Our approach is easy to implement, and can be successfully applied to different kinds of data (from images to 3D surfaces) undergoing arbitrary transformations. In addition to standard synthetic benchmarks, we showcase our method in challenging real-world applications, where we compare favorably with the state of the art.}\n}", "pdf": "http://proceedings.mlr.press/v139/fumero21a/fumero21a.pdf", "supp": "", "pdf_size": 3790261, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12396284301042335137&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Sapienza, University of Rome, Rome, Italy+Universit`a della Svizzera italiana, Lugano, Switzerland; Sapienza, University of Rome, Rome, Italy+Universit`a della Svizzera italiana, Lugano, Switzerland; Sapienza, University of Rome, Rome, Italy; Sapienza, University of Rome, Rome, Italy", "aff_domain": "di.uniroma1.it; ; ; ", "email": "di.uniroma1.it; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fumero21a.html", "aff_unique_index": "0+1;0+1;0;0", "aff_unique_norm": "Sapienza University of Rome;Universit\u00e0 della Svizzera italiana", "aff_unique_dep": ";", "aff_unique_url": "https://www.uniroma1.it;https://www.usi.ch", "aff_unique_abbr": "Sapienza;USI", "aff_campus_unique_index": "0+1;0+1;0;0", "aff_campus_unique": "Rome;Lugano", "aff_country_unique_index": "0+1;0+1;0;0", "aff_country_unique": "Italy;Switzerland" }, { "title": "Learning from Biased Data: A Semi-Parametric Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10043", "id": "10043", "proceeding": "http://proceedings.mlr.press/v139/bertail21a.html", "slides": "", "author_site": "Patrice Bertail, Stephan Cl\u00e9men\u00e7on, Yannick Guyonvarch, Nathan NOIRY", "author": "Patrice Bertail; Stephan Cl\u00e9men\u00e7on; Yannick Guyonvarch; Nathan Noiry", "abstract": "We consider risk minimization problems where the (source) distribution $P_S$ of the training observations $Z_1, \\ldots, Z_n$ differs from the (target) distribution $P_T$ involved in the risk that one seeks to minimize. Under the natural assumption that $P_S$ dominates $P_T$, \\textit{i.e.} $P_T< \\! \\!", "bibtex": "@InProceedings{pmlr-v139-bertail21a,\n title = \t {Learning from Biased Data: A Semi-Parametric Approach},\n author = {Bertail, Patrice and Cl{\\'e}men{\\c{c}}on, Stephan and Guyonvarch, Yannick and Noiry, Nathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {803--812},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bertail21a/bertail21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bertail21a.html},\n abstract = \t {We consider risk minimization problems where the (source) distribution $P_S$ of the training observations $Z_1, \\ldots, Z_n$ differs from the (target) distribution $P_T$ involved in the risk that one seeks to minimize. Under the natural assumption that $P_S$ dominates $P_T$, \\textit{i.e.} $P_T< \\! \\!", "pdf": "http://proceedings.mlr.press/v139/bertail21a/bertail21a.pdf", "supp": "", "pdf_size": 1046440, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17963682311858696687&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Universit\u00e9 Paris-Nanterre, France; T\u00e9l\u00e9com Paris, France; T\u00e9l\u00e9com Paris, France; T\u00e9l\u00e9com Paris, France", "aff_domain": "parisnanterre.fr;telecom-paris.fr;telecom-paris.fr;telecom-paris.fr", "email": "parisnanterre.fr;telecom-paris.fr;telecom-paris.fr;telecom-paris.fr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/bertail21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Universit\u00e9 Paris-Nanterre;T\u00e9l\u00e9com Paris", "aff_unique_dep": ";", "aff_unique_url": "https://www.univ-parisnanterre.fr;https://www.telecom-paris.fr", "aff_unique_abbr": "UPN;T\u00e9l\u00e9com Paris", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "France" }, { "title": "Learning from History for Byzantine Robust Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9987", "id": "9987", "proceeding": "http://proceedings.mlr.press/v139/karimireddy21a.html", "slides": "/media/icml-2021/Slides/9987.pdf", "author_site": "Sai Praneeth Reddy Karimireddy, Lie He, Martin Jaggi", "author": "Sai Praneeth Karimireddy; Lie He; Martin Jaggi", "abstract": "Byzantine robustness has received significant attention recently given its importance for distributed and federated learning. In spite of this, we identify severe flaws in existing algorithms even when the data across the participants is identically distributed. First, we show realistic examples where current state of the art robust aggregation rules fail to converge even in the absence of any Byzantine attackers. Secondly, we prove that even if the aggregation rules may succeed in limiting the influence of the attackers in a single round, the attackers can couple their attacks across time eventually leading to divergence. To address these issues, we present two surprisingly simple strategies: a new robust iterative clipping procedure, and incorporating worker momentum to overcome time-coupled attacks. This is the first provably robust method for the standard stochastic optimization setting.", "bibtex": "@InProceedings{pmlr-v139-karimireddy21a,\n title = \t {Learning from History for Byzantine Robust Optimization},\n author = {Karimireddy, Sai Praneeth and He, Lie and Jaggi, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5311--5319},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/karimireddy21a/karimireddy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/karimireddy21a.html},\n abstract = \t {Byzantine robustness has received significant attention recently given its importance for distributed and federated learning. In spite of this, we identify severe flaws in existing algorithms even when the data across the participants is identically distributed. First, we show realistic examples where current state of the art robust aggregation rules fail to converge even in the absence of any Byzantine attackers. Secondly, we prove that even if the aggregation rules may succeed in limiting the influence of the attackers in a single round, the attackers can couple their attacks across time eventually leading to divergence. To address these issues, we present two surprisingly simple strategies: a new robust iterative clipping procedure, and incorporating worker momentum to overcome time-coupled attacks. This is the first provably robust method for the standard stochastic optimization setting.}\n}", "pdf": "http://proceedings.mlr.press/v139/karimireddy21a/karimireddy21a.pdf", "supp": "", "pdf_size": 3107448, "gs_citation": 240, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3091706733962162017&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "EPFL, Switzerland; EPFL, Switzerland; EPFL, Switzerland", "aff_domain": "epfl.ch; ; ", "email": "epfl.ch; ; ", "github": "https://github.com/epfml/byzantine-robust-optimizer", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/karimireddy21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Learning from Nested Data with Ornstein Auto-Encoders", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9609", "id": "9609", "proceeding": "http://proceedings.mlr.press/v139/choi21a.html", "slides": "", "author_site": "Youngwon Choi, Sungdong Lee, Joong-Ho (Johann) Won", "author": "Youngwon Choi; Sungdong Lee; Joong-Ho Won", "abstract": "Many of real-world data, e.g., the VGGFace2 dataset, which is a collection of multiple portraits of individuals, come with nested structures due to grouped observation. The Ornstein auto-encoder (OAE) is an emerging framework for representation learning from nested data, based on an optimal transport distance between random processes. An attractive feature of OAE is its ability to generate new variations nested within an observational unit, whether or not the unit is known to the model. A previously proposed algorithm for OAE, termed the random-intercept OAE (RIOAE), showed an impressive performance in learning nested representations, yet lacks theoretical justification. In this work, we show that RIOAE minimizes a loose upper bound of the employed optimal transport distance. After identifying several issues with RIOAE, we present the product-space OAE (PSOAE) that minimizes a tighter upper bound of the distance and achieves orthogonality in the representation space. PSOAE alleviates the instability of RIOAE and provides more flexible representation of nested data. We demonstrate the high performance of PSOAE in the three key tasks of generative models: exemplar generation, style transfer, and new concept generation.", "bibtex": "@InProceedings{pmlr-v139-choi21a,\n title = \t {Learning from Nested Data with Ornstein Auto-Encoders},\n author = {Choi, Youngwon and Lee, Sungdong and Won, Joong-Ho},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1943--1952},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/choi21a/choi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/choi21a.html},\n abstract = \t {Many of real-world data, e.g., the VGGFace2 dataset, which is a collection of multiple portraits of individuals, come with nested structures due to grouped observation. The Ornstein auto-encoder (OAE) is an emerging framework for representation learning from nested data, based on an optimal transport distance between random processes. An attractive feature of OAE is its ability to generate new variations nested within an observational unit, whether or not the unit is known to the model. A previously proposed algorithm for OAE, termed the random-intercept OAE (RIOAE), showed an impressive performance in learning nested representations, yet lacks theoretical justification. In this work, we show that RIOAE minimizes a loose upper bound of the employed optimal transport distance. After identifying several issues with RIOAE, we present the product-space OAE (PSOAE) that minimizes a tighter upper bound of the distance and achieves orthogonality in the representation space. PSOAE alleviates the instability of RIOAE and provides more flexible representation of nested data. We demonstrate the high performance of PSOAE in the three key tasks of generative models: exemplar generation, style transfer, and new concept generation.}\n}", "pdf": "http://proceedings.mlr.press/v139/choi21a/choi21a.pdf", "supp": "", "pdf_size": 2243351, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=769202265064516075&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 3, "aff": "Department of Statistics, Seoul National University + UCLA Center for Vision & Imaging Biomarkers; Department of Statistics, Seoul National University; Department of Statistics, Seoul National University", "aff_domain": "stats.snu.ac.kr;stats.snu.ac.kr;stats.snu.ac.kr", "email": "stats.snu.ac.kr;stats.snu.ac.kr;stats.snu.ac.kr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/choi21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "Seoul National University;University of California, Los Angeles", "aff_unique_dep": "Department of Statistics;Center for Vision & Imaging Biomarkers", "aff_unique_url": "https://www.snu.ac.kr;https://www.ucla.edu", "aff_unique_abbr": "SNU;UCLA", "aff_campus_unique_index": "0+1;0;0", "aff_campus_unique": "Seoul;Los Angeles", "aff_country_unique_index": "0+1;0;0", "aff_country_unique": "South Korea;United States" }, { "title": "Learning from Noisy Labels with No Change to the Training Process", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9933", "id": "9933", "proceeding": "http://proceedings.mlr.press/v139/zhang21k.html", "slides": "", "author_site": "Mingyuan Zhang, Jane Lee, Shivani Agarwal", "author": "Mingyuan Zhang; Jane Lee; Shivani Agarwal", "abstract": "There has been much interest in recent years in developing learning algorithms that can learn accurate classifiers from data with noisy labels. A widely-studied noise model is that of \\emph{class-conditional noise} (CCN), wherein a label $y$ is flipped to a label $\\tilde{y}$ with some associated noise probability that depends on both $y$ and $\\tilde{y}$. In the multiclass setting, all previously proposed algorithms under the CCN model involve changing the training process, by introducing a \u2018noise-correction\u2019 to the surrogate loss to be minimized over the noisy training examples. In this paper, we show that this is really unnecessary: one can simply perform class probability estimation (CPE) on the noisy examples, e.g. using a standard (multiclass) logistic regression algorithm, and then apply noise-correction only in the final prediction step. This means that the training algorithm itself does not need any change, and one can simply use standard off-the-shelf implementations with no modification to the code for training. Our approach can handle general multiclass loss matrices, including the usual 0-1 loss but also other losses such as those used for ordinal regression problems. We also provide a quantitative regret transfer bound, which bounds the target regret on the true distribution in terms of the CPE regret on the noisy distribution; in doing so, we extend the notion of strong properness introduced for binary losses by Agarwal (2014) to the multiclass case. Our bound suggests that the sample complexity of learning under CCN increases as the noise matrix approaches singularity. We also provide fixes and potential improvements for noise estimation methods that involve computing anchor points. Our experiments confirm our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-zhang21k,\n title = \t {Learning from Noisy Labels with No Change to the Training Process},\n author = {Zhang, Mingyuan and Lee, Jane and Agarwal, Shivani},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12468--12478},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21k/zhang21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21k.html},\n abstract = \t {There has been much interest in recent years in developing learning algorithms that can learn accurate classifiers from data with noisy labels. A widely-studied noise model is that of \\emph{class-conditional noise} (CCN), wherein a label $y$ is flipped to a label $\\tilde{y}$ with some associated noise probability that depends on both $y$ and $\\tilde{y}$. In the multiclass setting, all previously proposed algorithms under the CCN model involve changing the training process, by introducing a \u2018noise-correction\u2019 to the surrogate loss to be minimized over the noisy training examples. In this paper, we show that this is really unnecessary: one can simply perform class probability estimation (CPE) on the noisy examples, e.g. using a standard (multiclass) logistic regression algorithm, and then apply noise-correction only in the final prediction step. This means that the training algorithm itself does not need any change, and one can simply use standard off-the-shelf implementations with no modification to the code for training. Our approach can handle general multiclass loss matrices, including the usual 0-1 loss but also other losses such as those used for ordinal regression problems. We also provide a quantitative regret transfer bound, which bounds the target regret on the true distribution in terms of the CPE regret on the noisy distribution; in doing so, we extend the notion of strong properness introduced for binary losses by Agarwal (2014) to the multiclass case. Our bound suggests that the sample complexity of learning under CCN increases as the noise matrix approaches singularity. We also provide fixes and potential improvements for noise estimation methods that involve computing anchor points. Our experiments confirm our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21k/zhang21k.pdf", "supp": "", "pdf_size": 1465947, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10741393055241256389&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA, USA+Twitter, San Francisco, CA, USA; Twitter, San Francisco, CA, USA; Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA, USA", "aff_domain": "seas.upenn.edu; ;seas.upenn.edu", "email": "seas.upenn.edu; ;seas.upenn.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21k.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "University of Pennsylvania;Twitter", "aff_unique_dep": "Department of Computer and Information Science;", "aff_unique_url": "https://www.upenn.edu;https://twitter.com", "aff_unique_abbr": "UPenn;Twitter", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "Philadelphia;San Francisco", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Learning from Similarity-Confidence Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9869", "id": "9869", "proceeding": "http://proceedings.mlr.press/v139/cao21b.html", "slides": "", "author_site": "Yuzhou Cao, Lei Feng, Yitian Xu, Bo An, Gang Niu, Masashi Sugiyama", "author": "Yuzhou Cao; Lei Feng; Yitian Xu; Bo An; Gang Niu; Masashi Sugiyama", "abstract": "Weakly supervised learning has drawn considerable attention recently to reduce the expensive time and labor consumption of labeling massive data. In this paper, we investigate a novel weakly supervised learning problem of learning from similarity-confidence (Sconf) data, where only unlabeled data pairs equipped with confidence that illustrates their degree of similarity (two examples are similar if they belong to the same class) are needed for training a discriminative binary classifier. We propose an unbiased estimator of the classification risk that can be calculated from only Sconf data and show that the estimation error bound achieves the optimal convergence rate. To alleviate potential overfitting when flexible models are used, we further employ a risk correction scheme on the proposed risk estimator. Experimental results demonstrate the effectiveness of the proposed methods.", "bibtex": "@InProceedings{pmlr-v139-cao21b,\n title = \t {Learning from Similarity-Confidence Data},\n author = {Cao, Yuzhou and Feng, Lei and Xu, Yitian and An, Bo and Niu, Gang and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1272--1282},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cao21b/cao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cao21b.html},\n abstract = \t {Weakly supervised learning has drawn considerable attention recently to reduce the expensive time and labor consumption of labeling massive data. In this paper, we investigate a novel weakly supervised learning problem of learning from similarity-confidence (Sconf) data, where only unlabeled data pairs equipped with confidence that illustrates their degree of similarity (two examples are similar if they belong to the same class) are needed for training a discriminative binary classifier. We propose an unbiased estimator of the classification risk that can be calculated from only Sconf data and show that the estimation error bound achieves the optimal convergence rate. To alleviate potential overfitting when flexible models are used, we further employ a risk correction scheme on the proposed risk estimator. Experimental results demonstrate the effectiveness of the proposed methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/cao21b/cao21b.pdf", "supp": "", "pdf_size": 4626418, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6301313838757321729&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "College of Science, China Agricultural University, Beijing, China; College of Computer Science, Chongqing University, Chongqing, China + RIKEN Center for Advanced Intelligence Project, Tokyo, Japan; College of Science, China Agricultural University, Beijing, China; Nanyang Technological University, School of Computer Science and Engineering, Singapore; RIKEN Center for Advanced Intelligence Project, Tokyo, Japan; RIKEN Center for Advanced Intelligence Project, Tokyo, Japan + The University of Tokyo, Tokyo, Japan", "aff_domain": "cau.edu.cn;swu.edu.cn;126.com;ntu.edu.sg;riken.jp;k.u-tokyo.ac.jp", "email": "cau.edu.cn;swu.edu.cn;126.com;ntu.edu.sg;riken.jp;k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cao21b.html", "aff_unique_index": "0;1+2;0;3;2;2+4", "aff_unique_norm": "China Agricultural University;Chongqing University;RIKEN Center for Advanced Intelligence Project;Nanyang Technological University;University of Tokyo", "aff_unique_dep": "College of Science;College of Computer Science;Center for Advanced Intelligence Project;School of Computer Science and Engineering;", "aff_unique_url": "http://www.cau.edu.cn;http://en.cqu.edu.cn/;https://www.riken.jp/en/c-aip/;https://www.ntu.edu.sg;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "CAU;CQU;RIKEN C-AIP;NTU;UTokyo", "aff_campus_unique_index": "0;1+2;0;2;2+2", "aff_campus_unique": "Beijing;Chongqing;Tokyo;", "aff_country_unique_index": "0;0+1;0;2;1;1+1", "aff_country_unique": "China;Japan;Singapore" }, { "title": "Learning in Nonzero-Sum Stochastic Games with Potentials", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10095", "id": "10095", "proceeding": "http://proceedings.mlr.press/v139/mguni21a.html", "slides": "", "author_site": "David Mguni, Yutong Wu, Yali Du, Yaodong Yang, Ziyi Wang, Minne Li, Ying Wen, Joel Jennings, Jun Wang", "author": "David H Mguni; Yutong Wu; Yali Du; Yaodong Yang; Ziyi Wang; Minne Li; Ying Wen; Joel Jennings; Jun Wang", "abstract": "Multi-agent reinforcement learning (MARL) has become effective in tackling discrete cooperative game scenarios. However, MARL has yet to penetrate settings beyond those modelled by team and zero-sum games, confining it to a small subset of multi-agent systems. In this paper, we introduce a new generation of MARL learners that can handle \\textit{nonzero-sum} payoff structures and continuous settings. In particular, we study the MARL problem in a class of games known as stochastic potential games (SPGs) with continuous state-action spaces. Unlike cooperative games, in which all agents share a common reward, SPGs are capable of modelling real-world scenarios where agents seek to fulfil their individual goals. We prove theoretically our learning method, $\\ourmethod$, enables independent agents to learn Nash equilibrium strategies in \\textit{polynomial time}. We demonstrate our framework tackles previously unsolvable tasks such as \\textit{Coordination Navigation} and \\textit{large selfish routing games} and that it outperforms the state of the art MARL baselines such as MADDPG and COMIX in such scenarios.", "bibtex": "@InProceedings{pmlr-v139-mguni21a,\n title = \t {Learning in Nonzero-Sum Stochastic Games with Potentials},\n author = {Mguni, David H and Wu, Yutong and Du, Yali and Yang, Yaodong and Wang, Ziyi and Li, Minne and Wen, Ying and Jennings, Joel and Wang, Jun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7688--7699},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mguni21a/mguni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mguni21a.html},\n abstract = \t {Multi-agent reinforcement learning (MARL) has become effective in tackling discrete cooperative game scenarios. However, MARL has yet to penetrate settings beyond those modelled by team and zero-sum games, confining it to a small subset of multi-agent systems. In this paper, we introduce a new generation of MARL learners that can handle \\textit{nonzero-sum} payoff structures and continuous settings. In particular, we study the MARL problem in a class of games known as stochastic potential games (SPGs) with continuous state-action spaces. Unlike cooperative games, in which all agents share a common reward, SPGs are capable of modelling real-world scenarios where agents seek to fulfil their individual goals. We prove theoretically our learning method, $\\ourmethod$, enables independent agents to learn Nash equilibrium strategies in \\textit{polynomial time}. We demonstrate our framework tackles previously unsolvable tasks such as \\textit{Coordination Navigation} and \\textit{large selfish routing games} and that it outperforms the state of the art MARL baselines such as MADDPG and COMIX in such scenarios.}\n}", "pdf": "http://proceedings.mlr.press/v139/mguni21a/mguni21a.pdf", "supp": "", "pdf_size": 1732586, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1419980077614376128&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Huawei R&D UK; Institute of Automation, Chinese Academy of Sciences; University College London, UK; Huawei R&D UK+University College London, UK; Institute of Automation, Chinese Academy of Sciences; University College London, UK; Shanghai Jiao Tong University; Huawei R&D UK; University College London, UK", "aff_domain": "hotmail.com; ; ;outlook.com; ; ; ; ; ", "email": "hotmail.com; ; ;outlook.com; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/mguni21a.html", "aff_unique_index": "0;1;2;0+2;1;2;3;0;2", "aff_unique_norm": "Huawei;Chinese Academy of Sciences;University College London;Shanghai Jiao Tong University", "aff_unique_dep": "R&D;Institute of Automation;;", "aff_unique_url": "https://www.huawei.com/uk;http://www.ia.cas.cn;https://www.ucl.ac.uk;https://www.sjtu.edu.cn", "aff_unique_abbr": "Huawei;CAS;UCL;SJTU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0+0;1;0;1;0;0", "aff_country_unique": "United Kingdom;China" }, { "title": "Learning to Generate Noise for Multi-Attack Robustness", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9867", "id": "9867", "proceeding": "http://proceedings.mlr.press/v139/madaan21a.html", "slides": "/media/icml-2021/Slides/9867.pdf", "author_site": "Divyam Madaan, Jinwoo Shin, Sung Ju Hwang", "author": "Divyam Madaan; Jinwoo Shin; Sung Ju Hwang", "abstract": "Adversarial learning has emerged as one of the successful techniques to circumvent the susceptibility of existing methods against adversarial perturbations. However, the majority of existing defense methods are tailored to defend against a single category of adversarial perturbation (e.g. $\\ell_\\infty$-attack). In safety-critical applications, this makes these methods extraneous as the attacker can adopt diverse adversaries to deceive the system. Moreover, training on multiple perturbations simultaneously significantly increases the computational overhead during training. To address these challenges, we propose a novel meta-learning framework that explicitly learns to generate noise to improve the model\u2019s robustness against multiple types of attacks. Its key component is \\emph{Meta Noise Generator (MNG)} that outputs optimal noise to stochastically perturb a given sample, such that it helps lower the error on diverse adversarial perturbations. By utilizing samples generated by MNG, we train a model by enforcing the label consistency across multiple perturbations. We validate the robustness of models trained by our scheme on various datasets and against a wide variety of perturbations, demonstrating that it significantly outperforms the baselines across multiple perturbations with a marginal computational cost.", "bibtex": "@InProceedings{pmlr-v139-madaan21a,\n title = \t {Learning to Generate Noise for Multi-Attack Robustness},\n author = {Madaan, Divyam and Shin, Jinwoo and Hwang, Sung Ju},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7279--7289},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/madaan21a/madaan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/madaan21a.html},\n abstract = \t {Adversarial learning has emerged as one of the successful techniques to circumvent the susceptibility of existing methods against adversarial perturbations. However, the majority of existing defense methods are tailored to defend against a single category of adversarial perturbation (e.g. $\\ell_\\infty$-attack). In safety-critical applications, this makes these methods extraneous as the attacker can adopt diverse adversaries to deceive the system. Moreover, training on multiple perturbations simultaneously significantly increases the computational overhead during training. To address these challenges, we propose a novel meta-learning framework that explicitly learns to generate noise to improve the model\u2019s robustness against multiple types of attacks. Its key component is \\emph{Meta Noise Generator (MNG)} that outputs optimal noise to stochastically perturb a given sample, such that it helps lower the error on diverse adversarial perturbations. By utilizing samples generated by MNG, we train a model by enforcing the label consistency across multiple perturbations. We validate the robustness of models trained by our scheme on various datasets and against a wide variety of perturbations, demonstrating that it significantly outperforms the baselines across multiple perturbations with a marginal computational cost.}\n}", "pdf": "http://proceedings.mlr.press/v139/madaan21a/madaan21a.pdf", "supp": "", "pdf_size": 2334434, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10029031126071377800&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Computing, KAIST, South Korea+Graduate School of AI, KAIST, South Korea+AITRICS, South Korea; School of Electrical Engineering, KAIST, South Korea+Graduate School of AI, KAIST, South Korea; School of Computing, KAIST, South Korea+Graduate School of AI, KAIST, South Korea+AITRICS, South Korea", "aff_domain": "kaist.ac.kr; ; ", "email": "kaist.ac.kr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/madaan21a.html", "aff_unique_index": "0+0+1;0+0;0+0+1", "aff_unique_norm": "KAIST;AITRICS", "aff_unique_dep": "School of Computing;", "aff_unique_url": "https://www.kaist.ac.kr;", "aff_unique_abbr": "KAIST;", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0;0+0+0", "aff_country_unique": "South Korea" }, { "title": "Learning to Price Against a Moving Target", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9127", "id": "9127", "proceeding": "http://proceedings.mlr.press/v139/leme21a.html", "slides": "", "author_site": "Renato Leme, Balasubramanian Sivan, Yifeng Teng, Pratik Worah", "author": "Renato Paes Leme; Balasubramanian Sivan; Yifeng Teng; Pratik Worah", "abstract": "In the Learning to Price setting, a seller posts prices over time with the goal of maximizing revenue while learning the buyer\u2019s valuation. This problem is very well understood when values are stationary (fixed or iid). Here we study the problem where the buyer\u2019s value is a moving target, i.e., they change over time either by a stochastic process or adversarially with bounded variation. In either case, we provide matching upper and lower bounds on the optimal revenue loss. Since the target is moving, any information learned soon becomes out-dated, which forces the algorithms to keep switching between exploring and exploiting phases.", "bibtex": "@InProceedings{pmlr-v139-leme21a,\n title = \t {Learning to Price Against a Moving Target},\n author = {Leme, Renato Paes and Sivan, Balasubramanian and Teng, Yifeng and Worah, Pratik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6223--6232},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/leme21a/leme21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/leme21a.html},\n abstract = \t {In the Learning to Price setting, a seller posts prices over time with the goal of maximizing revenue while learning the buyer\u2019s valuation. This problem is very well understood when values are stationary (fixed or iid). Here we study the problem where the buyer\u2019s value is a moving target, i.e., they change over time either by a stochastic process or adversarially with bounded variation. In either case, we provide matching upper and lower bounds on the optimal revenue loss. Since the target is moving, any information learned soon becomes out-dated, which forces the algorithms to keep switching between exploring and exploiting phases.}\n}", "pdf": "http://proceedings.mlr.press/v139/leme21a/leme21a.pdf", "supp": "", "pdf_size": 271051, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8690206335391581468&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Google Research, New York, NY, USA; Google Research, New York, NY, USA; Department of Computer Sciences, University of Wisconsin-Madison, Madison, WI, USA; Google Research, New York, NY, USA", "aff_domain": "google.com;google.com;cs.wisc.edu;google.com", "email": "google.com;google.com;cs.wisc.edu;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/leme21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Google;University of Wisconsin-Madison", "aff_unique_dep": "Google Research;Department of Computer Sciences", "aff_unique_url": "https://research.google;https://www.wisc.edu", "aff_unique_abbr": "Google Research;UW-Madison", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "New York;Madison", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning to Rehearse in Long Sequence Memorization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9499", "id": "9499", "proceeding": "http://proceedings.mlr.press/v139/zhang21ac.html", "slides": "", "author_site": "Zhu Zhang, Chang Zhou, Jianxin Ma, Zhijie Lin, Jingren Zhou, Hongxia Yang, Zhou Zhao", "author": "Zhu Zhang; Chang Zhou; Jianxin Ma; Zhijie Lin; Jingren Zhou; Hongxia Yang; Zhou Zhao", "abstract": "Existing reasoning tasks often have an important assumption that the input contents can be always accessed while reasoning, requiring unlimited storage resources and suffering from severe time delay on long sequences. To achieve efficient reasoning on long sequences with limited storage resources, memory augmented neural networks introduce a human-like write-read memory to compress and memorize the long input sequence in one pass, trying to answer subsequent queries only based on the memory. But they have two serious drawbacks: 1) they continually update the memory from current information and inevitably forget the early contents; 2) they do not distinguish what information is important and treat all contents equally. In this paper, we propose the Rehearsal Memory (RM) to enhance long-sequence memorization by self-supervised rehearsal with a history sampler. To alleviate the gradual forgetting of early information, we design self-supervised rehearsal training with recollection and familiarity tasks. Further, we design a history sampler to select informative fragments for rehearsal training, making the memory focus on the crucial information. We evaluate the performance of our rehearsal memory by the synthetic bAbI task and several downstream tasks, including text/video question answering and recommendation on long sequences.", "bibtex": "@InProceedings{pmlr-v139-zhang21ac,\n title = \t {Learning to Rehearse in Long Sequence Memorization},\n author = {Zhang, Zhu and Zhou, Chang and Ma, Jianxin and Lin, Zhijie and Zhou, Jingren and Yang, Hongxia and Zhao, Zhou},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12663--12673},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21ac/zhang21ac.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21ac.html},\n abstract = \t {Existing reasoning tasks often have an important assumption that the input contents can be always accessed while reasoning, requiring unlimited storage resources and suffering from severe time delay on long sequences. To achieve efficient reasoning on long sequences with limited storage resources, memory augmented neural networks introduce a human-like write-read memory to compress and memorize the long input sequence in one pass, trying to answer subsequent queries only based on the memory. But they have two serious drawbacks: 1) they continually update the memory from current information and inevitably forget the early contents; 2) they do not distinguish what information is important and treat all contents equally. In this paper, we propose the Rehearsal Memory (RM) to enhance long-sequence memorization by self-supervised rehearsal with a history sampler. To alleviate the gradual forgetting of early information, we design self-supervised rehearsal training with recollection and familiarity tasks. Further, we design a history sampler to select informative fragments for rehearsal training, making the memory focus on the crucial information. We evaluate the performance of our rehearsal memory by the synthetic bAbI task and several downstream tasks, including text/video question answering and recommendation on long sequences.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21ac/zhang21ac.pdf", "supp": "", "pdf_size": 1830918, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7260848896026459016&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Zhejiang University, China+DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; Zhejiang University, China; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; Zhejiang University, China", "aff_domain": "zju.edu.cn; ; ; ; ; ;zju.edu.cn", "email": "zju.edu.cn; ; ; ; ; ;zju.edu.cn", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zhang21ac.html", "aff_unique_index": "0+1;1;1;0;1;1;0", "aff_unique_norm": "Zhejiang University;Alibaba Group", "aff_unique_dep": ";DAMO Academy", "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", "aff_unique_abbr": "ZJU;Alibaba", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Learning to Weight Imperfect Demonstrations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8833", "id": "8833", "proceeding": "http://proceedings.mlr.press/v139/wang21aa.html", "slides": "", "author_site": "Yunke Wang, Chang Xu, Bo Du, Honglak Lee", "author": "Yunke Wang; Chang Xu; Bo Du; Honglak Lee", "abstract": "This paper investigates how to weight imperfect expert demonstrations for generative adversarial imitation learning (GAIL). The agent is expected to perform behaviors demonstrated by experts. But in many applications, experts could also make mistakes and their demonstrations would mislead or slow the learning process of the agent. Recently, existing methods for imitation learning from imperfect demonstrations mostly focus on using the preference or confidence scores to distinguish imperfect demonstrations. However, these auxiliary information needs to be collected with the help of an oracle, which is usually hard and expensive to afford in practice. In contrast, this paper proposes a method of learning to weight imperfect demonstrations in GAIL without imposing extensive prior information. We provide a rigorous mathematical analysis, presenting that the weights of demonstrations can be exactly determined by combining the discriminator and agent policy in GAIL. Theoretical analysis suggests that with the estimated weights the agent can learn a better policy beyond those plain expert demonstrations. Experiments in the Mujoco and Atari environments demonstrate that the proposed algorithm outperforms baseline methods in handling imperfect expert demonstrations.", "bibtex": "@InProceedings{pmlr-v139-wang21aa,\n title = \t {Learning to Weight Imperfect Demonstrations},\n author = {Wang, Yunke and Xu, Chang and Du, Bo and Lee, Honglak},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10961--10970},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21aa/wang21aa.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21aa.html},\n abstract = \t {This paper investigates how to weight imperfect expert demonstrations for generative adversarial imitation learning (GAIL). The agent is expected to perform behaviors demonstrated by experts. But in many applications, experts could also make mistakes and their demonstrations would mislead or slow the learning process of the agent. Recently, existing methods for imitation learning from imperfect demonstrations mostly focus on using the preference or confidence scores to distinguish imperfect demonstrations. However, these auxiliary information needs to be collected with the help of an oracle, which is usually hard and expensive to afford in practice. In contrast, this paper proposes a method of learning to weight imperfect demonstrations in GAIL without imposing extensive prior information. We provide a rigorous mathematical analysis, presenting that the weights of demonstrations can be exactly determined by combining the discriminator and agent policy in GAIL. Theoretical analysis suggests that with the estimated weights the agent can learn a better policy beyond those plain expert demonstrations. Experiments in the Mujoco and Atari environments demonstrate that the proposed algorithm outperforms baseline methods in handling imperfect expert demonstrations.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21aa/wang21aa.pdf", "supp": "", "pdf_size": 4547564, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6552997381165765271&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China; School of Computer Science, Faculty of Engineering, The University of Sydney, Australia; EECS Department, University of Michigan, USA; LG AI Research, South Korea", "aff_domain": "whu.edu.cn;sydney.edu.au;whu.edu.cn;umich.edu", "email": "whu.edu.cn;sydney.edu.au;whu.edu.cn;umich.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21aa.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Wuhan University;University of Sydney;University of Michigan;LG", "aff_unique_dep": "School of Computer Science;School of Computer Science;EECS Department;AI Research", "aff_unique_url": "http://www.whu.edu.cn;https://www.sydney.edu.au;https://www.umich.edu;https://www.lgaires.com", "aff_unique_abbr": "WHU;USYD;UM;LG AI Research", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Wuhan;;Ann Arbor", "aff_country_unique_index": "0;1;2;3", "aff_country_unique": "China;Australia;United States;South Korea" }, { "title": "Lenient Regret and Good-Action Identification in Gaussian Process Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10637", "id": "10637", "proceeding": "http://proceedings.mlr.press/v139/cai21c.html", "slides": "", "author_site": "Xu Cai, Selwyn Gomes, Jonathan Scarlett", "author": "Xu Cai; Selwyn Gomes; Jonathan Scarlett", "abstract": "In this paper, we study the problem of Gaussian process (GP) bandits under relaxed optimization criteria stating that any function value above a certain threshold is \u201cgood enough\u201d. On the theoretical side, we study various {\\em lenient regret} notions in which all near-optimal actions incur zero penalty, and provide upper bounds on the lenient regret for GP-UCB and an elimination algorithm, circumventing the usual $O(\\sqrt{T})$ term (with time horizon $T$) resulting from zooming extremely close towards the function maximum. In addition, we complement these upper bounds with algorithm-independent lower bounds. On the practical side, we consider the problem of finding a single \u201cgood action\u201d according to a known pre-specified threshold, and introduce several good-action identification algorithms that exploit knowledge of the threshold. We experimentally find that such algorithms can typically find a good action faster than standard optimization-based approaches.", "bibtex": "@InProceedings{pmlr-v139-cai21c,\n title = \t {Lenient Regret and Good-Action Identification in Gaussian Process Bandits},\n author = {Cai, Xu and Gomes, Selwyn and Scarlett, Jonathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1183--1192},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21c/cai21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21c.html},\n abstract = \t {In this paper, we study the problem of Gaussian process (GP) bandits under relaxed optimization criteria stating that any function value above a certain threshold is \u201cgood enough\u201d. On the theoretical side, we study various {\\em lenient regret} notions in which all near-optimal actions incur zero penalty, and provide upper bounds on the lenient regret for GP-UCB and an elimination algorithm, circumventing the usual $O(\\sqrt{T})$ term (with time horizon $T$) resulting from zooming extremely close towards the function maximum. In addition, we complement these upper bounds with algorithm-independent lower bounds. On the practical side, we consider the problem of finding a single \u201cgood action\u201d according to a known pre-specified threshold, and introduce several good-action identification algorithms that exploit knowledge of the threshold. We experimentally find that such algorithms can typically find a good action faster than standard optimization-based approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21c/cai21c.pdf", "supp": "", "pdf_size": 579376, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13998414945788250067&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, National University of Singapore + Department of Mathematics & Institute of Data Science, National University of Singapore; Department of Computer Science, National University of Singapore + Department of Mathematics & Institute of Data Science, National University of Singapore; Department of Computer Science, National University of Singapore + Department of Mathematics & Institute of Data Science, National University of Singapore", "aff_domain": "u.nus.edu;comp.nus.edu.sg;comp.nus.edu.sg", "email": "u.nus.edu;comp.nus.edu.sg;comp.nus.edu.sg", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cai21c.html", "aff_unique_index": "0+0;0+0;0+0", "aff_unique_norm": "National University of Singapore", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.nus.edu.sg", "aff_unique_abbr": "NUS", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Singapore" }, { "title": "Let\u2019s Agree to Degree: Comparing Graph Convolutional Networks in the Message-Passing Framework", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10439", "id": "10439", "proceeding": "http://proceedings.mlr.press/v139/geerts21a.html", "slides": "/media/icml-2021/Slides/10439_4ujJjsJ.pdf", "author_site": "Floris Geerts, Filip Mazowiecki, Guillermo Perez", "author": "Floris Geerts; Filip Mazowiecki; Guillermo Perez", "abstract": "In this paper we cast neural networks defined on graphs as message-passing neural networks (MPNNs) to study the distinguishing power of different classes of such models. We are interested in when certain architectures are able to tell vertices apart based on the feature labels given as input with the graph. We consider two variants of MPNNS: anonymous MPNNs whose message functions depend only on the labels of vertices involved; and degree-aware MPNNs whose message functions can additionally use information regarding the degree of vertices. The former class covers popular graph neural network (GNN) formalisms for which the distinguished power is known. The latter covers graph convolutional networks (GCNs), introduced by Kipf and Welling, for which the distinguishing power was unknown. We obtain lower and upper bounds on the distinguishing power of (anonymous and degree-aware) MPNNs in terms of the distinguishing power of the Weisfeiler-Lehman (WL) algorithm. Our main results imply that (i) the distinguishing power of GCNs is bounded by the WL algorithm, but they may be one step ahead; (ii) the WL algorithm cannot be simulated by \u201cplain vanilla\u201d GCNs but the addition of a trade-off parameter between features of the vertex and those of its neighbours (as proposed by Kipf and Welling) resolves this problem.", "bibtex": "@InProceedings{pmlr-v139-geerts21a,\n title = \t {Let\u2019s Agree to Degree: Comparing Graph Convolutional Networks in the Message-Passing Framework},\n author = {Geerts, Floris and Mazowiecki, Filip and Perez, Guillermo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3640--3649},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/geerts21a/geerts21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/geerts21a.html},\n abstract = \t {In this paper we cast neural networks defined on graphs as message-passing neural networks (MPNNs) to study the distinguishing power of different classes of such models. We are interested in when certain architectures are able to tell vertices apart based on the feature labels given as input with the graph. We consider two variants of MPNNS: anonymous MPNNs whose message functions depend only on the labels of vertices involved; and degree-aware MPNNs whose message functions can additionally use information regarding the degree of vertices. The former class covers popular graph neural network (GNN) formalisms for which the distinguished power is known. The latter covers graph convolutional networks (GCNs), introduced by Kipf and Welling, for which the distinguishing power was unknown. We obtain lower and upper bounds on the distinguishing power of (anonymous and degree-aware) MPNNs in terms of the distinguishing power of the Weisfeiler-Lehman (WL) algorithm. Our main results imply that (i) the distinguishing power of GCNs is bounded by the WL algorithm, but they may be one step ahead; (ii) the WL algorithm cannot be simulated by \u201cplain vanilla\u201d GCNs but the addition of a trade-off parameter between features of the vertex and those of its neighbours (as proposed by Kipf and Welling) resolves this problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/geerts21a/geerts21a.pdf", "supp": "", "pdf_size": 320888, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1266626381321593219&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Antwerp, Belgium; Max Planck Institute for Software Systems, Germany; Flanders Make, Belgium", "aff_domain": "uantwerpen.be; ; ", "email": "uantwerpen.be; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/geerts21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of Antwerp;Max Planck Institute for Software Systems;Flanders Make", "aff_unique_dep": ";;", "aff_unique_url": "https://www.uantwerp.be;https://www.mpi-sws.org;https://www.flandersmake.be", "aff_unique_abbr": "UA;MPI-SWS;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Belgium;Germany" }, { "title": "Leveraged Weighted Loss for Partial Label Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9687", "id": "9687", "proceeding": "http://proceedings.mlr.press/v139/wen21a.html", "slides": "/media/icml-2021/Slides/9687.pdf", "author_site": "Hongwei Wen, Jingyi Cui, Hanyuan Hang, Jiabin Liu, Yisen Wang, Zhouchen Lin", "author": "Hongwei Wen; Jingyi Cui; Hanyuan Hang; Jiabin Liu; Yisen Wang; Zhouchen Lin", "abstract": "As an important branch of weakly supervised learning, partial label learning deals with data where each instance is assigned with a set of candidate labels, whereas only one of them is true. Despite many methodology studies on learning from partial labels, there still lacks theoretical understandings of their risk consistent properties under relatively weak assumptions, especially on the link between theoretical results and the empirical choice of parameters. In this paper, we propose a family of loss functions named \\textit{Leveraged Weighted} (LW) loss, which for the first time introduces the leverage parameter $\\beta$ to consider the trade-off between losses on partial labels and non-partial ones. From the theoretical side, we derive a generalized result of risk consistency for the LW loss in learning from partial labels, based on which we provide guidance to the choice of the leverage parameter $\\beta$. In experiments, we verify the theoretical guidance, and show the high effectiveness of our proposed LW loss on both benchmark and real datasets compared with other state-of-the-art partial label learning algorithms.", "bibtex": "@InProceedings{pmlr-v139-wen21a,\n title = \t {Leveraged Weighted Loss for Partial Label Learning},\n author = {Wen, Hongwei and Cui, Jingyi and Hang, Hanyuan and Liu, Jiabin and Wang, Yisen and Lin, Zhouchen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11091--11100},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wen21a/wen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wen21a.html},\n abstract = \t {As an important branch of weakly supervised learning, partial label learning deals with data where each instance is assigned with a set of candidate labels, whereas only one of them is true. Despite many methodology studies on learning from partial labels, there still lacks theoretical understandings of their risk consistent properties under relatively weak assumptions, especially on the link between theoretical results and the empirical choice of parameters. In this paper, we propose a family of loss functions named \\textit{Leveraged Weighted} (LW) loss, which for the first time introduces the leverage parameter $\\beta$ to consider the trade-off between losses on partial labels and non-partial ones. From the theoretical side, we derive a generalized result of risk consistency for the LW loss in learning from partial labels, based on which we provide guidance to the choice of the leverage parameter $\\beta$. In experiments, we verify the theoretical guidance, and show the high effectiveness of our proposed LW loss on both benchmark and real datasets compared with other state-of-the-art partial label learning algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/wen21a/wen21a.pdf", "supp": "", "pdf_size": 580320, "gs_citation": 130, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5461608260366903450&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 13, "aff": "Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China+Department of Applied Mathematics, University of Twente, The Netherlands; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China+Department of Applied Mathematics, University of Twente, The Netherlands; Department of Applied Mathematics, University of Twente, The Netherlands; Samsung Research China-Beijing, Beijing, China; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, China+Pazhou Lab, Guangzhou, China", "aff_domain": "pku.edu.cn;pku.edu.cn;utwente.nl;samsung.com;pku.edu.cn;pku.edu.cn", "email": "pku.edu.cn;pku.edu.cn;utwente.nl;samsung.com;pku.edu.cn;pku.edu.cn", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/wen21a.html", "aff_unique_index": "0+1;0+1;1;2;0;0+3", "aff_unique_norm": "Peking University;University of Twente;Samsung;Pazhou Lab", "aff_unique_dep": "School of EECS;Department of Applied Mathematics;Samsung Research China;", "aff_unique_url": "http://www.pku.edu.cn;https://www.utwente.nl;https://www.samsung.com/cn;", "aff_unique_abbr": "Peking U;;SRC;", "aff_campus_unique_index": ";;1;2", "aff_campus_unique": ";Beijing;Guangzhou", "aff_country_unique_index": "0+1;0+1;1;0;0;0+0", "aff_country_unique": "China;Netherlands" }, { "title": "Leveraging Good Representations in Linear Contextual Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9267", "id": "9267", "proceeding": "http://proceedings.mlr.press/v139/papini21a.html", "slides": "/media/icml-2021/Slides/9267.pdf", "author_site": "Matteo Papini, Andrea Tirinzoni, Marcello Restelli, Alessandro Lazaric, Matteo Pirotta", "author": "Matteo Papini; Andrea Tirinzoni; Marcello Restelli; Alessandro Lazaric; Matteo Pirotta", "abstract": "The linear contextual bandit literature is mostly focused on the design of efficient learning algorithms for a given representation. However, a contextual bandit problem may admit multiple linear representations, each one with different characteristics that directly impact the regret of the learning algorithm. In particular, recent works showed that there exist \u201cgood\u201d representations for which constant problem-dependent regret can be achieved. In this paper, we first provide a systematic analysis of the different definitions of \u201cgood\u201d representations proposed in the literature. We then propose a novel selection algorithm able to adapt to the best representation in a set of $M$ candidates. We show that the regret is indeed never worse than the regret obtained by running \\textsc{LinUCB} on best representation (up to a $\\ln M$ factor). As a result, our algorithm achieves constant regret if a \u201cgood\u201d representation is available in the set. Furthermore, we show the algorithm may still achieve constant regret by implicitly constructing a \u201cgood\u201d representation, even when none of the initial representations is \u201cgood\u201d. Finally, we validate our theoretical findings in a number of standard contextual bandit problems.", "bibtex": "@InProceedings{pmlr-v139-papini21a,\n title = \t {Leveraging Good Representations in Linear Contextual Bandits},\n author = {Papini, Matteo and Tirinzoni, Andrea and Restelli, Marcello and Lazaric, Alessandro and Pirotta, Matteo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8371--8380},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/papini21a/papini21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/papini21a.html},\n abstract = \t {The linear contextual bandit literature is mostly focused on the design of efficient learning algorithms for a given representation. However, a contextual bandit problem may admit multiple linear representations, each one with different characteristics that directly impact the regret of the learning algorithm. In particular, recent works showed that there exist \u201cgood\u201d representations for which constant problem-dependent regret can be achieved. In this paper, we first provide a systematic analysis of the different definitions of \u201cgood\u201d representations proposed in the literature. We then propose a novel selection algorithm able to adapt to the best representation in a set of $M$ candidates. We show that the regret is indeed never worse than the regret obtained by running \\textsc{LinUCB} on best representation (up to a $\\ln M$ factor). As a result, our algorithm achieves constant regret if a \u201cgood\u201d representation is available in the set. Furthermore, we show the algorithm may still achieve constant regret by implicitly constructing a \u201cgood\u201d representation, even when none of the initial representations is \u201cgood\u201d. Finally, we validate our theoretical findings in a number of standard contextual bandit problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/papini21a/papini21a.pdf", "supp": "", "pdf_size": 583912, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17497583548748472397&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Politecnico di Milano, Milan, Italy+Facebook AI Research; Politecnico di Milano, Milan, Italy; Politecnico di Milano, Milan, Italy; Facebook AI Research, Paris, France; Facebook AI Research, Paris, France", "aff_domain": "polimi.it; ; ; ; ", "email": "polimi.it; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/papini21a.html", "aff_unique_index": "0+1;0;0;1;1", "aff_unique_norm": "Politecnico di Milano;Meta", "aff_unique_dep": ";Facebook AI Research", "aff_unique_url": "https://www.polimi.it;https://research.facebook.com", "aff_unique_abbr": "Polimi;FAIR", "aff_campus_unique_index": "0;0;0;2;2", "aff_campus_unique": "Milan;;Paris", "aff_country_unique_index": "0+1;0;0;2;2", "aff_country_unique": "Italy;United States;France" }, { "title": "Leveraging Language to Learn Program Abstractions and Search Heuristics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10371", "id": "10371", "proceeding": "http://proceedings.mlr.press/v139/wong21a.html", "slides": "", "author_site": "Catherine Wong, Kevin Ellis, Josh Tenenbaum, Jacob Andreas", "author": "Catherine Wong; Kevin M Ellis; Joshua Tenenbaum; Jacob Andreas", "abstract": "Inductive program synthesis, or inferring programs from examples of desired behavior, offers a general paradigm for building interpretable, robust, andgeneralizable machine learning systems. Effective program synthesis depends on two key ingredients: a strong library of functions from which to build programs, and an efficient search strategy for finding programs that solve a given task. We introduce LAPS (Language for Abstraction and Program Search), a technique for using natural language annotations to guide joint learning of libraries and neurally-guided search models for synthesis. When integrated into a state-of-the-art library learning system (DreamCoder), LAPS produces higher-quality libraries and improves search efficiency and generalization on three domains {\u2013} string editing, image composition, and abstract reasoning about scenes {\u2013} even when no natural language hints are available at test time.", "bibtex": "@InProceedings{pmlr-v139-wong21a,\n title = \t {Leveraging Language to Learn Program Abstractions and Search Heuristics},\n author = {Wong, Catherine and Ellis, Kevin M and Tenenbaum, Joshua and Andreas, Jacob},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11193--11204},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wong21a/wong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wong21a.html},\n abstract = \t {Inductive program synthesis, or inferring programs from examples of desired behavior, offers a general paradigm for building interpretable, robust, andgeneralizable machine learning systems. Effective program synthesis depends on two key ingredients: a strong library of functions from which to build programs, and an efficient search strategy for finding programs that solve a given task. We introduce LAPS (Language for Abstraction and Program Search), a technique for using natural language annotations to guide joint learning of libraries and neurally-guided search models for synthesis. When integrated into a state-of-the-art library learning system (DreamCoder), LAPS produces higher-quality libraries and improves search efficiency and generalization on three domains {\u2013} string editing, image composition, and abstract reasoning about scenes {\u2013} even when no natural language hints are available at test time.}\n}", "pdf": "http://proceedings.mlr.press/v139/wong21a/wong21a.pdf", "supp": "", "pdf_size": 2912430, "gs_citation": 69, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9695724782277813172&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "MIT; Cornell University; MIT+Center for Brains, Minds and Machines (CBMM) - MIT; MIT", "aff_domain": "mit.edu; ; ; ", "email": "mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wong21a.html", "aff_unique_index": "0;1;0+0;0", "aff_unique_norm": "Massachusetts Institute of Technology;Cornell University", "aff_unique_dep": ";", "aff_unique_url": "https://web.mit.edu;https://www.cornell.edu", "aff_unique_abbr": "MIT;Cornell", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Leveraging Non-uniformity in First-order Non-convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9457", "id": "9457", "proceeding": "http://proceedings.mlr.press/v139/mei21a.html", "slides": "/media/icml-2021/Slides/9457.pdf", "author_site": "Jincheng Mei, Yue Gao, Bo Dai, Csaba Szepesvari, Dale Schuurmans", "author": "Jincheng Mei; Yue Gao; Bo Dai; Csaba Szepesvari; Dale Schuurmans", "abstract": "Classical global convergence results for first-order methods rely on uniform smoothness and the \u0141{}ojasiewicz inequality. Motivated by properties of objective functions that arise in machine learning, we propose a non-uniform refinement of these notions, leading to \\emph{Non-uniform Smoothness} (NS) and \\emph{Non-uniform \u0141{}ojasiewicz inequality} (N\u0141{}). The new definitions inspire new geometry-aware first-order methods that are able to converge to global optimality faster than the classical $\\Omega(1/t^2)$ lower bounds. To illustrate the power of these geometry-aware methods and their corresponding non-uniform analysis, we consider two important problems in machine learning: policy gradient optimization in reinforcement learning (PG), and generalized linear model training in supervised learning (GLM). For PG, we find that normalizing the gradient ascent method can accelerate convergence to $O(e^{- c \\cdot t})$ (where $c > 0$) while incurring less overhead than existing algorithms. For GLM, we show that geometry-aware normalized gradient descent can also achieve a linear convergence rate, which significantly improves the best known results. We additionally show that the proposed geometry-aware gradient descent methods escape landscape plateaus faster than standard gradient descent. Experimental results are used to illustrate and complement the theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-mei21a,\n title = \t {Leveraging Non-uniformity in First-order Non-convex Optimization},\n author = {Mei, Jincheng and Gao, Yue and Dai, Bo and Szepesvari, Csaba and Schuurmans, Dale},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7555--7564},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mei21a/mei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mei21a.html},\n abstract = \t {Classical global convergence results for first-order methods rely on uniform smoothness and the \u0141{}ojasiewicz inequality. Motivated by properties of objective functions that arise in machine learning, we propose a non-uniform refinement of these notions, leading to \\emph{Non-uniform Smoothness} (NS) and \\emph{Non-uniform \u0141{}ojasiewicz inequality} (N\u0141{}). The new definitions inspire new geometry-aware first-order methods that are able to converge to global optimality faster than the classical $\\Omega(1/t^2)$ lower bounds. To illustrate the power of these geometry-aware methods and their corresponding non-uniform analysis, we consider two important problems in machine learning: policy gradient optimization in reinforcement learning (PG), and generalized linear model training in supervised learning (GLM). For PG, we find that normalizing the gradient ascent method can accelerate convergence to $O(e^{- c \\cdot t})$ (where $c > 0$) while incurring less overhead than existing algorithms. For GLM, we show that geometry-aware normalized gradient descent can also achieve a linear convergence rate, which significantly improves the best known results. We additionally show that the proposed geometry-aware gradient descent methods escape landscape plateaus faster than standard gradient descent. Experimental results are used to illustrate and complement the theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/mei21a/mei21a.pdf", "supp": "", "pdf_size": 1124893, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9574115275726979230&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "University of Alberta; University of Alberta; Google Research, Brain Team; DeepMind; University of Alberta + Google Research, Brain Team", "aff_domain": "ualberta.ca;ualberta.ca; ; ; ", "email": "ualberta.ca;ualberta.ca; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mei21a.html", "aff_unique_index": "0;0;1;2;0+1", "aff_unique_norm": "University of Alberta;Google;DeepMind", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.ualberta.ca;https://research.google;https://deepmind.com", "aff_unique_abbr": "UAlberta;Google;DeepMind", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;1;2;0+1", "aff_country_unique": "Canada;United States;United Kingdom" }, { "title": "Leveraging Public Data for Practical Private Query Release", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9383", "id": "9383", "proceeding": "http://proceedings.mlr.press/v139/liu21w.html", "slides": "", "author_site": "Terrance Liu, Giuseppe Vietri, Thomas Steinke, Jonathan Ullman, Steven Wu", "author": "Terrance Liu; Giuseppe Vietri; Thomas Steinke; Jonathan Ullman; Steven Wu", "abstract": "In many statistical problems, incorporating priors can significantly improve performance. However, the use of prior knowledge in differentially private query release has remained underexplored, despite such priors commonly being available in the form of public datasets, such as previous US Census releases. With the goal of releasing statistics about a private dataset, we present PMW^Pub, which\u2014unlike existing baselines\u2014leverages public data drawn from a related distribution as prior information. We provide a theoretical analysis and an empirical evaluation on the American Community Survey (ACS) and ADULT datasets, which shows that our method outperforms state-of-the-art methods. Furthermore, PMW^Pub scales well to high-dimensional data domains, where running many existing methods would be computationally infeasible.", "bibtex": "@InProceedings{pmlr-v139-liu21w,\n title = \t {Leveraging Public Data for Practical Private Query Release},\n author = {Liu, Terrance and Vietri, Giuseppe and Steinke, Thomas and Ullman, Jonathan and Wu, Steven},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6968--6977},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21w/liu21w.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21w.html},\n abstract = \t {In many statistical problems, incorporating priors can significantly improve performance. However, the use of prior knowledge in differentially private query release has remained underexplored, despite such priors commonly being available in the form of public datasets, such as previous US Census releases. With the goal of releasing statistics about a private dataset, we present PMW^Pub, which\u2014unlike existing baselines\u2014leverages public data drawn from a related distribution as prior information. We provide a theoretical analysis and an empirical evaluation on the American Community Survey (ACS) and ADULT datasets, which shows that our method outperforms state-of-the-art methods. Furthermore, PMW^Pub scales well to high-dimensional data domains, where running many existing methods would be computationally infeasible.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21w/liu21w.pdf", "supp": "", "pdf_size": 1508106, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10819180564771632569&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University, Pittsburgh, PA, USA; University of Minnesota, Minneapolis, MN, USA; Google, Mountain View, CA, USA; Northeastern University, Boston, MA, USA; Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "cmu.edu; ; ; ;cmu.edu", "email": "cmu.edu; ; ; ;cmu.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21w.html", "aff_unique_index": "0;1;2;3;0", "aff_unique_norm": "Carnegie Mellon University;University of Minnesota;Google;Northeastern University", "aff_unique_dep": ";;Google;", "aff_unique_url": "https://www.cmu.edu;https://www.minnesota.edu;https://www.google.com;https://www.northeastern.edu", "aff_unique_abbr": "CMU;UMN;Google;NEU", "aff_campus_unique_index": "0;1;2;3;0", "aff_campus_unique": "Pittsburgh;Minneapolis;Mountain View;Boston", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Leveraging Sparse Linear Layers for Debuggable Deep Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10529", "id": "10529", "proceeding": "http://proceedings.mlr.press/v139/wong21b.html", "slides": "", "author_site": "Eric Wong, Shibani Santurkar, Aleksander Madry", "author": "Eric Wong; Shibani Santurkar; Aleksander Madry", "abstract": "We show how fitting sparse linear models over learned deep feature representations can lead to more debuggable neural networks. These networks remain highly accurate while also being more amenable to human interpretation, as we demonstrate quantitatively and via human experiments. We further illustrate how the resulting sparse explanations can help to identify spurious correlations, explain misclassifications, and diagnose model biases in vision and language tasks.", "bibtex": "@InProceedings{pmlr-v139-wong21b,\n title = \t {Leveraging Sparse Linear Layers for Debuggable Deep Networks},\n author = {Wong, Eric and Santurkar, Shibani and Madry, Aleksander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11205--11216},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wong21b/wong21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wong21b.html},\n abstract = \t {We show how fitting sparse linear models over learned deep feature representations can lead to more debuggable neural networks. These networks remain highly accurate while also being more amenable to human interpretation, as we demonstrate quantitatively and via human experiments. We further illustrate how the resulting sparse explanations can help to identify spurious correlations, explain misclassifications, and diagnose model biases in vision and language tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/wong21b/wong21b.pdf", "supp": "", "pdf_size": 6651652, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7540826532948114130&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu;mit.edu; ", "email": "mit.edu;mit.edu; ", "github": "https://github.com/madrylab/debuggabledeepnetworks", "project": "https://github.com/madrylab/glm_saga", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wong21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "LieTransformer: Equivariant Self-Attention for Lie Groups", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8407", "id": "8407", "proceeding": "http://proceedings.mlr.press/v139/hutchinson21a.html", "slides": "", "author_site": "Michael Hutchinson, Charline Le Lan, Sheheryar Zaidi, Emilien Dupont, Yee-Whye Teh, Hyunjik Kim", "author": "Michael J Hutchinson; Charline Le Lan; Sheheryar Zaidi; Emilien Dupont; Yee Whye Teh; Hyunjik Kim", "abstract": "Group equivariant neural networks are used as building blocks of group invariant neural networks, which have been shown to improve generalisation performance and data efficiency through principled parameter sharing. Such works have mostly focused on group equivariant convolutions, building on the result that group equivariant linear maps are necessarily convolutions. In this work, we extend the scope of the literature to self-attention, that is emerging as a prominent building block of deep learning models. We propose the LieTransformer, an architecture composed of LieSelfAttention layers that are equivariant to arbitrary Lie groups and their discrete subgroups. We demonstrate the generality of our approach by showing experimental results that are competitive to baseline methods on a wide range of tasks: shape counting on point clouds, molecular property regression and modelling particle trajectories under Hamiltonian dynamics.", "bibtex": "@InProceedings{pmlr-v139-hutchinson21a,\n title = \t {LieTransformer: Equivariant Self-Attention for Lie Groups},\n author = {Hutchinson, Michael J and Lan, Charline Le and Zaidi, Sheheryar and Dupont, Emilien and Teh, Yee Whye and Kim, Hyunjik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4533--4543},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hutchinson21a/hutchinson21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hutchinson21a.html},\n abstract = \t {Group equivariant neural networks are used as building blocks of group invariant neural networks, which have been shown to improve generalisation performance and data efficiency through principled parameter sharing. Such works have mostly focused on group equivariant convolutions, building on the result that group equivariant linear maps are necessarily convolutions. In this work, we extend the scope of the literature to self-attention, that is emerging as a prominent building block of deep learning models. We propose the LieTransformer, an architecture composed of LieSelfAttention layers that are equivariant to arbitrary Lie groups and their discrete subgroups. We demonstrate the generality of our approach by showing experimental results that are competitive to baseline methods on a wide range of tasks: shape counting on point clouds, molecular property regression and modelling particle trajectories under Hamiltonian dynamics.}\n}", "pdf": "http://proceedings.mlr.press/v139/hutchinson21a/hutchinson21a.pdf", "supp": "", "pdf_size": 793751, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2610009358606993712&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/hutchinson21a.html" }, { "title": "Light RUMs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8503", "id": "8503", "proceeding": "http://proceedings.mlr.press/v139/chierichetti21a.html", "slides": "", "author_site": "Flavio Chierichetti, Ravi Kumar, Andrew Tomkins", "author": "Flavio Chierichetti; Ravi Kumar; Andrew Tomkins", "abstract": "A Random Utility Model (RUM) is a distribution on permutations over a universe of items. For each subset of the universe, a RUM induces a natural distribution of the winner in the subset: choose a permutation according to the RUM distribution and pick the maximum item in the subset according to the chosen permutation. RUMs are widely used in the theory of discrete choice. In this paper we consider the question of the (lossy) compressibility of RUMs on a universe of size $n$, i.e., the minimum number of bits required to approximate the winning probabilities of each slate. Our main result is that RUMs can be approximated using $\\tilde{O}(n^2)$ bits, an exponential improvement over the standard representation; furthermore, we show that this bound is optimal. En route, we sharpen the classical existential result of McFadden and Train (2000) by showing that the minimum size of a mixture of multinomial logits required to can approximate a general RUM is $\\tilde{\\Theta}(n)$.", "bibtex": "@InProceedings{pmlr-v139-chierichetti21a,\n title = \t {Light RUMs},\n author = {Chierichetti, Flavio and Kumar, Ravi and Tomkins, Andrew},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1888--1897},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chierichetti21a/chierichetti21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chierichetti21a.html},\n abstract = \t {A Random Utility Model (RUM) is a distribution on permutations over a universe of items. For each subset of the universe, a RUM induces a natural distribution of the winner in the subset: choose a permutation according to the RUM distribution and pick the maximum item in the subset according to the chosen permutation. RUMs are widely used in the theory of discrete choice. In this paper we consider the question of the (lossy) compressibility of RUMs on a universe of size $n$, i.e., the minimum number of bits required to approximate the winning probabilities of each slate. Our main result is that RUMs can be approximated using $\\tilde{O}(n^2)$ bits, an exponential improvement over the standard representation; furthermore, we show that this bound is optimal. En route, we sharpen the classical existential result of McFadden and Train (2000) by showing that the minimum size of a mixture of multinomial logits required to can approximate a general RUM is $\\tilde{\\Theta}(n)$.}\n}", "pdf": "http://proceedings.mlr.press/v139/chierichetti21a/chierichetti21a.pdf", "supp": "", "pdf_size": 698999, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1454731805742142763&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Dipartimento di Informatica, Sapienza University of Rome, Italy; Google, Mountain View, CA, USA; Google, Mountain View, CA, USA", "aff_domain": "di.uniroma1.it;gmail.com;gmail.com", "email": "di.uniroma1.it;gmail.com;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chierichetti21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Sapienza University of Rome;Google", "aff_unique_dep": "Department of Informatics;Google", "aff_unique_url": "https://www.sapienza.uniroma.it;https://www.google.com", "aff_unique_abbr": "Sapienza;Google", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Rome;Mountain View", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Italy;United States" }, { "title": "Linear Transformers Are Secretly Fast Weight Programmers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10587", "id": "10587", "proceeding": "http://proceedings.mlr.press/v139/schlag21a.html", "slides": "", "author_site": "Imanol Schlag, Kazuki Irie, J\u00fcrgen Schmidhuber", "author": "Imanol Schlag; Kazuki Irie; J\u00fcrgen Schmidhuber", "abstract": "We show the formal equivalence of linearised self-attention mechanisms and fast weight controllers from the early \u201990s, where a slow neural net learns by gradient descent to program the fast weights of another net through sequences of elementary programming instructions which are additive outer products of self-invented activation patterns (today called keys and values). Such Fast Weight Programmers (FWPs) learn to manipulate the contents of a finite memory and dynamically interact with it. We infer a memory capacity limitation of recent linearised softmax attention variants, and replace the purely additive outer products by a delta rule-like programming instruction, such that the FWP can more easily learn to correct the current mapping from keys to values. The FWP also learns to compute dynamically changing learning rates. We also propose a new kernel function to linearise attention which balances simplicity and effectiveness. We conduct experiments on synthetic retrieval problems as well as standard machine translation and language modelling tasks which demonstrate the benefits of our methods.", "bibtex": "@InProceedings{pmlr-v139-schlag21a,\n title = \t {Linear Transformers Are Secretly Fast Weight Programmers},\n author = {Schlag, Imanol and Irie, Kazuki and Schmidhuber, J{\\\"u}rgen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9355--9366},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/schlag21a/schlag21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/schlag21a.html},\n abstract = \t {We show the formal equivalence of linearised self-attention mechanisms and fast weight controllers from the early \u201990s, where a slow neural net learns by gradient descent to program the fast weights of another net through sequences of elementary programming instructions which are additive outer products of self-invented activation patterns (today called keys and values). Such Fast Weight Programmers (FWPs) learn to manipulate the contents of a finite memory and dynamically interact with it. We infer a memory capacity limitation of recent linearised softmax attention variants, and replace the purely additive outer products by a delta rule-like programming instruction, such that the FWP can more easily learn to correct the current mapping from keys to values. The FWP also learns to compute dynamically changing learning rates. We also propose a new kernel function to linearise attention which balances simplicity and effectiveness. We conduct experiments on synthetic retrieval problems as well as standard machine translation and language modelling tasks which demonstrate the benefits of our methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/schlag21a/schlag21a.pdf", "supp": "", "pdf_size": 1326478, "gs_citation": 265, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7929763198773172485&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The Swiss AI Lab IDSIA, USI & SUPSI; The Swiss AI Lab IDSIA, USI & SUPSI; The Swiss AI Lab IDSIA, USI & SUPSI", "aff_domain": "idsia.ch;idsia.ch;idsia.ch", "email": "idsia.ch;idsia.ch;idsia.ch", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/schlag21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Swiss AI Lab IDSIA", "aff_unique_dep": "AI Lab", "aff_unique_url": "https://www.idsia.ch/", "aff_unique_abbr": "IDSIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Link Prediction with Persistent Homology: An Interactive View", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9487", "id": "9487", "proceeding": "http://proceedings.mlr.press/v139/yan21b.html", "slides": "/media/icml-2021/Slides/9487.pdf", "author_site": "Zuoyu Yan, Tengfei Ma, Liangcai Gao, Zhi Tang, Chao Chen", "author": "Zuoyu Yan; Tengfei Ma; Liangcai Gao; Zhi Tang; Chao Chen", "abstract": "Link prediction is an important learning task for graph-structured data. In this paper, we propose a novel topological approach to characterize interactions between two nodes. Our topological feature, based on the extended persistent homology, encodes rich structural information regarding the multi-hop paths connecting nodes. Based on this feature, we propose a graph neural network method that outperforms state-of-the-arts on different benchmarks. As another contribution, we propose a novel algorithm to more efficiently compute the extended persistence diagrams for graphs. This algorithm can be generally applied to accelerate many other topological methods for graph learning tasks.", "bibtex": "@InProceedings{pmlr-v139-yan21b,\n title = \t {Link Prediction with Persistent Homology: An Interactive View},\n author = {Yan, Zuoyu and Ma, Tengfei and Gao, Liangcai and Tang, Zhi and Chen, Chao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11659--11669},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yan21b/yan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yan21b.html},\n abstract = \t {Link prediction is an important learning task for graph-structured data. In this paper, we propose a novel topological approach to characterize interactions between two nodes. Our topological feature, based on the extended persistent homology, encodes rich structural information regarding the multi-hop paths connecting nodes. Based on this feature, we propose a graph neural network method that outperforms state-of-the-arts on different benchmarks. As another contribution, we propose a novel algorithm to more efficiently compute the extended persistence diagrams for graphs. This algorithm can be generally applied to accelerate many other topological methods for graph learning tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/yan21b/yan21b.pdf", "supp": "", "pdf_size": 662659, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6988958697269886780&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Wangxuan Institute of Computer Technology, Peking University, Beijing, China+1; T. J. Watson Research Center, IBM, New York, USA+2; Wangxuan Institute of Computer Technology, Peking University, Beijing, China+1; Wangxuan Institute of Computer Technology, Peking University, Beijing, China+1; Department of Biomedical Informatics, Stony Brook University, New York, USA+3", "aff_domain": "pku.edu.cn;ibm.com;pku.edu.cn;pku.edu.cn;stonybrook.edu", "email": "pku.edu.cn;ibm.com;pku.edu.cn;pku.edu.cn;stonybrook.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yan21b.html", "aff_unique_index": "0;2;0;0;3", "aff_unique_norm": "Peking University;;IBM;Stony Brook University", "aff_unique_dep": "Wangxuan Institute of Computer Technology;;T. J. Watson Research Center;Department of Biomedical Informatics", "aff_unique_url": "http://www.pku.edu.cn;;https://www.ibm.com;https://www.stonybrook.edu", "aff_unique_abbr": "PKU;;IBM;SBU", "aff_campus_unique_index": "0;2;0;0;3", "aff_campus_unique": "Beijing;;New York;Stony Brook", "aff_country_unique_index": "0;2;0;0;2", "aff_country_unique": "China;;United States" }, { "title": "Lipschitz normalization for self-attention layers with application to graph neural networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8537", "id": "8537", "proceeding": "http://proceedings.mlr.press/v139/dasoulas21a.html", "slides": "/media/icml-2021/Slides/8537.pdf", "author_site": "George Dasoulas, Kevin Scaman, Aladin Virmaux", "author": "George Dasoulas; Kevin Scaman; Aladin Virmaux", "abstract": "Attention based neural networks are state of the art in a large range of applications. However, their performance tends to degrade when the number of layers increases. In this work, we show that enforcing Lipschitz continuity by normalizing the attention scores can significantly improve the performance of deep attention models. First, we show that, for deep graph attention networks (GAT), gradient explosion appears during training, leading to poor performance of gradient-based training algorithms. To address this issue, we derive a theoretical analysis of the Lipschitz continuity of attention modules and introduce LipschitzNorm, a simple and parameter-free normalization for self-attention mechanisms that enforces the model to be Lipschitz continuous. We then apply LipschitzNorm to GAT and Graph Transformers and show that their performance is substantially improved in the deep setting (10 to 30 layers). More specifically, we show that a deep GAT model with LipschitzNorm achieves state of the art results for node label prediction tasks that exhibit long-range dependencies, while showing consistent improvements over their unnormalized counterparts in benchmark node classification tasks.", "bibtex": "@InProceedings{pmlr-v139-dasoulas21a,\n title = \t {Lipschitz normalization for self-attention layers with application to graph neural networks},\n author = {Dasoulas, George and Scaman, Kevin and Virmaux, Aladin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2456--2466},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dasoulas21a/dasoulas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dasoulas21a.html},\n abstract = \t {Attention based neural networks are state of the art in a large range of applications. However, their performance tends to degrade when the number of layers increases. In this work, we show that enforcing Lipschitz continuity by normalizing the attention scores can significantly improve the performance of deep attention models. First, we show that, for deep graph attention networks (GAT), gradient explosion appears during training, leading to poor performance of gradient-based training algorithms. To address this issue, we derive a theoretical analysis of the Lipschitz continuity of attention modules and introduce LipschitzNorm, a simple and parameter-free normalization for self-attention mechanisms that enforces the model to be Lipschitz continuous. We then apply LipschitzNorm to GAT and Graph Transformers and show that their performance is substantially improved in the deep setting (10 to 30 layers). More specifically, we show that a deep GAT model with LipschitzNorm achieves state of the art results for node label prediction tasks that exhibit long-range dependencies, while showing consistent improvements over their unnormalized counterparts in benchmark node classification tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/dasoulas21a/dasoulas21a.pdf", "supp": "", "pdf_size": 2395164, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11996902541195607773&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Noah\u2019s Ark Lab, Huawei Technologies France+DaSciM, LIX, \u00c9cole Polytechnique, France; Noah\u2019s Ark Lab, Huawei Technologies France; Noah\u2019s Ark Lab, Huawei Technologies France", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/dasoulas21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "Huawei;Ecole Polytechnique", "aff_unique_dep": "Noah\u2019s Ark Lab;DaSciM, LIX", "aff_unique_url": "https://www.huawei.com/fr;https://www.ec-polytechnique.fr", "aff_unique_abbr": "Huawei;X", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "France" }, { "title": "Local Algorithms for Finding Densely Connected Clusters", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10555", "id": "10555", "proceeding": "http://proceedings.mlr.press/v139/macgregor21a.html", "slides": "/media/icml-2021/Slides/10555_NW0O0Ro.pdf", "author_site": "Peter Macgregor, He Sun", "author": "Peter Macgregor; He Sun", "abstract": "Local graph clustering is an important algorithmic technique for analysing massive graphs, and has been widely applied in many research fields of data science. While the objective of most (local) graph clustering algorithms is to find a vertex set of low conductance, there has been a sequence of recent studies that highlight the importance of the inter-connection between clusters when analysing real-world datasets. Following this line of research, in this work we study local algorithms for finding a pair of vertex sets defined with respect to their inter-connection and their relationship with the rest of the graph. The key to our analysis is a new reduction technique that relates the structure of multiple sets to a single vertex set in the reduced graph. Among many potential applications, we show that our algorithms successfully recover densely connected clusters in the Interstate Disputes Dataset and the US Migration Dataset.", "bibtex": "@InProceedings{pmlr-v139-macgregor21a,\n title = \t {Local Algorithms for Finding Densely Connected Clusters},\n author = {Macgregor, Peter and Sun, He},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7268--7278},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/macgregor21a/macgregor21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/macgregor21a.html},\n abstract = \t {Local graph clustering is an important algorithmic technique for analysing massive graphs, and has been widely applied in many research fields of data science. While the objective of most (local) graph clustering algorithms is to find a vertex set of low conductance, there has been a sequence of recent studies that highlight the importance of the inter-connection between clusters when analysing real-world datasets. Following this line of research, in this work we study local algorithms for finding a pair of vertex sets defined with respect to their inter-connection and their relationship with the rest of the graph. The key to our analysis is a new reduction technique that relates the structure of multiple sets to a single vertex set in the reduced graph. Among many potential applications, we show that our algorithms successfully recover densely connected clusters in the Interstate Disputes Dataset and the US Migration Dataset.}\n}", "pdf": "http://proceedings.mlr.press/v139/macgregor21a/macgregor21a.pdf", "supp": "", "pdf_size": 1548679, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2599205940153817748&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "School of Informatics, University of Edinburgh, Edinburgh, United Kingdom; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom", "aff_domain": "ed.ac.uk;ed.ac.uk", "email": "ed.ac.uk;ed.ac.uk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/macgregor21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Local Correlation Clustering with Asymmetric Classification Errors", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9161", "id": "9161", "proceeding": "http://proceedings.mlr.press/v139/jafarov21a.html", "slides": "/media/icml-2021/Slides/9161.pdf", "author_site": "Jafar Jafarov, Sanchit Kalhan, Konstantin Makarychev, Yury Makarychev", "author": "Jafar Jafarov; Sanchit Kalhan; Konstantin Makarychev; Yury Makarychev", "abstract": "In the Correlation Clustering problem, we are given a complete weighted graph $G$ with its edges labeled as \u201csimilar\" and \u201cdissimilar\" by a noisy binary classifier. For a clustering $\\mathcal{C}$ of graph $G$, a similar edge is in disagreement with $\\mathcal{C}$, if its endpoints belong to distinct clusters; and a dissimilar edge is in disagreement with $\\mathcal{C}$ if its endpoints belong to the same cluster. The disagreements vector, $\\disagree$, is a vector indexed by the vertices of $G$ such that the $v$-th coordinate $\\disagree_v$ equals the weight of all disagreeing edges incident on $v$. The goal is to produce a clustering that minimizes the $\\ell_p$ norm of the disagreements vector for $p\\geq 1$. We study the $\\ell_p$ objective in Correlation Clustering under the following assumption: Every similar edge has weight in $[\\alpha\\mathbf{w},\\mathbf{w}]$ and every dissimilar edge has weight at least $\\alpha\\mathbf{w}$ (where $\\alpha \\leq 1$ and $\\mathbf{w}>0$ is a scaling parameter). We give an $O\\left((\\nicefrac{1}{\\alpha})^{\\nicefrac{1}{2}-\\nicefrac{1}{2p}}\\cdot \\log\\nicefrac{1}{\\alpha}\\right)$ approximation algorithm for this problem. Furthermore, we show an almost matching convex programming integrality gap.", "bibtex": "@InProceedings{pmlr-v139-jafarov21a,\n title = \t {Local Correlation Clustering with Asymmetric Classification Errors},\n author = {Jafarov, Jafar and Kalhan, Sanchit and Makarychev, Konstantin and Makarychev, Yury},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4677--4686},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jafarov21a/jafarov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jafarov21a.html},\n abstract = \t {In the Correlation Clustering problem, we are given a complete weighted graph $G$ with its edges labeled as \u201csimilar\" and \u201cdissimilar\" by a noisy binary classifier. For a clustering $\\mathcal{C}$ of graph $G$, a similar edge is in disagreement with $\\mathcal{C}$, if its endpoints belong to distinct clusters; and a dissimilar edge is in disagreement with $\\mathcal{C}$ if its endpoints belong to the same cluster. The disagreements vector, $\\disagree$, is a vector indexed by the vertices of $G$ such that the $v$-th coordinate $\\disagree_v$ equals the weight of all disagreeing edges incident on $v$. The goal is to produce a clustering that minimizes the $\\ell_p$ norm of the disagreements vector for $p\\geq 1$. We study the $\\ell_p$ objective in Correlation Clustering under the following assumption: Every similar edge has weight in $[\\alpha\\mathbf{w},\\mathbf{w}]$ and every dissimilar edge has weight at least $\\alpha\\mathbf{w}$ (where $\\alpha \\leq 1$ and $\\mathbf{w}>0$ is a scaling parameter). We give an $O\\left((\\nicefrac{1}{\\alpha})^{\\nicefrac{1}{2}-\\nicefrac{1}{2p}}\\cdot \\log\\nicefrac{1}{\\alpha}\\right)$ approximation algorithm for this problem. Furthermore, we show an almost matching convex programming integrality gap.}\n}", "pdf": "http://proceedings.mlr.press/v139/jafarov21a/jafarov21a.pdf", "supp": "", "pdf_size": 342621, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3301681135764938791&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of Chicago; Northwestern University; Northwestern University; TTIC", "aff_domain": "uchicago.edu;u.northwestern.edu; ; ", "email": "uchicago.edu;u.northwestern.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jafarov21a.html", "aff_unique_index": "0;1;1;2", "aff_unique_norm": "University of Chicago;Northwestern University;Toyota Technological Institute at Chicago", "aff_unique_dep": ";;", "aff_unique_url": "https://www.uchicago.edu;https://www.northwestern.edu;https://www.ttic.edu", "aff_unique_abbr": "UChicago;NU;TTIC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Chicago", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Locally Adaptive Label Smoothing Improves Predictive Churn", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8779", "id": "8779", "proceeding": "http://proceedings.mlr.press/v139/bahri21a.html", "slides": "", "author_site": "Dara Bahri, Heinrich Jiang", "author": "Dara Bahri; Heinrich Jiang", "abstract": "Training modern neural networks is an inherently noisy process that can lead to high \\emph{prediction churn}\u2013 disagreements between re-trainings of the same model due to factors such as randomization in the parameter initialization and mini-batches\u2013 even when the trained models all attain similar accuracies. Such prediction churn can be very undesirable in practice. In this paper, we present several baselines for reducing churn and show that training on soft labels obtained by adaptively smoothing each example\u2019s label based on the example\u2019s neighboring labels often outperforms the baselines on churn while improving accuracy on a variety of benchmark classification tasks and model architectures.", "bibtex": "@InProceedings{pmlr-v139-bahri21a,\n title = \t {Locally Adaptive Label Smoothing Improves Predictive Churn},\n author = {Bahri, Dara and Jiang, Heinrich},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {532--542},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bahri21a/bahri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bahri21a.html},\n abstract = \t {Training modern neural networks is an inherently noisy process that can lead to high \\emph{prediction churn}\u2013 disagreements between re-trainings of the same model due to factors such as randomization in the parameter initialization and mini-batches\u2013 even when the trained models all attain similar accuracies. Such prediction churn can be very undesirable in practice. In this paper, we present several baselines for reducing churn and show that training on soft labels obtained by adaptively smoothing each example\u2019s label based on the example\u2019s neighboring labels often outperforms the baselines on churn while improving accuracy on a variety of benchmark classification tasks and model architectures.}\n}", "pdf": "http://proceedings.mlr.press/v139/bahri21a/bahri21a.pdf", "supp": "", "pdf_size": 1247156, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13621302320717983888&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Google Research, Mountain View, USA; Google Research, Mountain View, USA", "aff_domain": "google.com; ", "email": "google.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bahri21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Locally Persistent Exploration in Continuous Control Tasks with Sparse Rewards", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9613", "id": "9613", "proceeding": "http://proceedings.mlr.press/v139/amin21a.html", "slides": "", "author_site": "Susan Amin, Maziar Gomrokchi, Hossein Aboutalebi, Harsh Satija, Doina Precup", "author": "Susan Amin; Maziar Gomrokchi; Hossein Aboutalebi; Harsh Satija; Doina Precup", "abstract": "A major challenge in reinforcement learning is the design of exploration strategies, especially for environments with sparse reward structures and continuous state and action spaces. Intuitively, if the reinforcement signal is very scarce, the agent should rely on some form of short-term memory in order to cover its environment efficiently. We propose a new exploration method, based on two intuitions: (1) the choice of the next exploratory action should depend not only on the (Markovian) state of the environment, but also on the agent\u2019s trajectory so far, and (2) the agent should utilize a measure of spread in the state space to avoid getting stuck in a small region. Our method leverages concepts often used in statistical physics to provide explanations for the behavior of simplified (polymer) chains in order to generate persistent (locally self-avoiding) trajectories in state space. We discuss the theoretical properties of locally self-avoiding walks and their ability to provide a kind of short-term memory through a decaying temporal correlation within the trajectory. We provide empirical evaluations of our approach in a simulated 2D navigation task, as well as higher-dimensional MuJoCo continuous control locomotion tasks with sparse rewards.", "bibtex": "@InProceedings{pmlr-v139-amin21a,\n title = \t {Locally Persistent Exploration in Continuous Control Tasks with Sparse Rewards},\n author = {Amin, Susan and Gomrokchi, Maziar and Aboutalebi, Hossein and Satija, Harsh and Precup, Doina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {275--285},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/amin21a/amin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/amin21a.html},\n abstract = \t {A major challenge in reinforcement learning is the design of exploration strategies, especially for environments with sparse reward structures and continuous state and action spaces. Intuitively, if the reinforcement signal is very scarce, the agent should rely on some form of short-term memory in order to cover its environment efficiently. We propose a new exploration method, based on two intuitions: (1) the choice of the next exploratory action should depend not only on the (Markovian) state of the environment, but also on the agent\u2019s trajectory so far, and (2) the agent should utilize a measure of spread in the state space to avoid getting stuck in a small region. Our method leverages concepts often used in statistical physics to provide explanations for the behavior of simplified (polymer) chains in order to generate persistent (locally self-avoiding) trajectories in state space. We discuss the theoretical properties of locally self-avoiding walks and their ability to provide a kind of short-term memory through a decaying temporal correlation within the trajectory. We provide empirical evaluations of our approach in a simulated 2D navigation task, as well as higher-dimensional MuJoCo continuous control locomotion tasks with sparse rewards.}\n}", "pdf": "http://proceedings.mlr.press/v139/amin21a/amin21a.pdf", "supp": "", "pdf_size": 7828042, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15739830429970028692&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, McGill University, Montr \u00b4eal, Qu \u00b4ebec, Canada + Mila - Qu \u00b4ebec Arti\ufb01cial Intelligence Institute, Montr \u00b4eal, Qu \u00b4ebec, Canada; Department of Computer Science, McGill University, Montr \u00b4eal, Qu \u00b4ebec, Canada + Mila - Qu \u00b4ebec Arti\ufb01cial Intelligence Institute, Montr \u00b4eal, Qu \u00b4ebec, Canada; Department of Computer Science, University of Waterloo, Waterloo, Ontario, Canada + Waterloo Arti\ufb01cial Intelligence Institute, University of Waterloo, Waterloo, Ontario, Canada; Department of Computer Science, McGill University, Montr \u00b4eal, Qu \u00b4ebec, Canada + Mila - Qu \u00b4ebec Arti\ufb01cial Intelligence Institute, Montr \u00b4eal, Qu \u00b4ebec, Canada; Department of Computer Science, McGill University, Montr \u00b4eal, Qu \u00b4ebec, Canada + Mila - Qu \u00b4ebec Arti\ufb01cial Intelligence Institute, Montr \u00b4eal, Qu \u00b4ebec, Canada", "aff_domain": "mail.mcgill.ca; ; ; ; ", "email": "mail.mcgill.ca; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/amin21a.html", "aff_unique_index": "0+1;0+1;2+2;0+1;0+1", "aff_unique_norm": "McGill University;Mila - Quebec Artificial Intelligence Institute;University of Waterloo", "aff_unique_dep": "Department of Computer Science;Artificial Intelligence;Department of Computer Science", "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec;https://uwaterloo.ca", "aff_unique_abbr": "McGill;Mila;UW", "aff_campus_unique_index": "0+1;0+1;2+2;0+1;0+1", "aff_campus_unique": "Montr\u00e9al;Montreal;Waterloo", "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", "aff_country_unique": "Canada" }, { "title": "Locally Private k-Means in One Round", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8515", "id": "8515", "proceeding": "http://proceedings.mlr.press/v139/chang21a.html", "slides": "/media/icml-2021/Slides/8515.pdf", "author_site": "Alisa Chang, Badih Ghazi, Ravi Kumar, Pasin Manurangsi", "author": "Alisa Chang; Badih Ghazi; Ravi Kumar; Pasin Manurangsi", "abstract": "We provide an approximation algorithm for k-means clustering in the \\emph{one-round} (aka \\emph{non-interactive}) local model of differential privacy (DP). Our algorithm achieves an approximation ratio arbitrarily close to the best \\emph{non private} approximation algorithm, improving upon previously known algorithms that only guarantee large (constant) approximation ratios. Furthermore, ours is the first constant-factor approximation algorithm for k-means that requires only \\emph{one} round of communication in the local DP model, positively resolving an open question of Stemmer (SODA 2020). Our algorithmic framework is quite flexible; we demonstrate this by showing that it also yields a similar near-optimal approximation algorithm in the (one-round) shuffle DP model.", "bibtex": "@InProceedings{pmlr-v139-chang21a,\n title = \t {Locally Private k-Means in One Round},\n author = {Chang, Alisa and Ghazi, Badih and Kumar, Ravi and Manurangsi, Pasin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1441--1451},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chang21a/chang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chang21a.html},\n abstract = \t {We provide an approximation algorithm for k-means clustering in the \\emph{one-round} (aka \\emph{non-interactive}) local model of differential privacy (DP). Our algorithm achieves an approximation ratio arbitrarily close to the best \\emph{non private} approximation algorithm, improving upon previously known algorithms that only guarantee large (constant) approximation ratios. Furthermore, ours is the first constant-factor approximation algorithm for k-means that requires only \\emph{one} round of communication in the local DP model, positively resolving an open question of Stemmer (SODA 2020). Our algorithmic framework is quite flexible; we demonstrate this by showing that it also yields a similar near-optimal approximation algorithm in the (one-round) shuffle DP model.}\n}", "pdf": "http://proceedings.mlr.press/v139/chang21a/chang21a.pdf", "supp": "", "pdf_size": 689945, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11522309776303099270&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA; Google Research, Mountain View, CA", "aff_domain": "google.com;gmail.com;gmail.com;google.com", "email": "google.com;gmail.com;gmail.com;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chang21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "LogME: Practical Assessment of Pre-trained Models for Transfer Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10231", "id": "10231", "proceeding": "http://proceedings.mlr.press/v139/you21b.html", "slides": "/media/icml-2021/Slides/10231.pdf", "author_site": "Kaichao You, Yong Liu, Jianmin Wang, Mingsheng Long", "author": "Kaichao You; Yong Liu; Jianmin Wang; Mingsheng Long", "abstract": "This paper studies task adaptive pre-trained model selection, an underexplored problem of assessing pre-trained models for the target task and select best ones from the model zoo \\emph{without fine-tuning}. A few pilot works addressed the problem in transferring supervised pre-trained models to classification tasks, but they cannot handle emerging unsupervised pre-trained models or regression tasks. In pursuit of a practical assessment method, we propose to estimate the maximum value of label evidence given features extracted by pre-trained models. Unlike the maximum likelihood, the maximum evidence is \\emph{immune to over-fitting}, while its expensive computation can be dramatically reduced by our carefully designed algorithm. The Logarithm of Maximum Evidence (LogME) can be used to assess pre-trained models for transfer learning: a pre-trained model with a high LogME value is likely to have good transfer performance. LogME is \\emph{fast, accurate, and general}, characterizing itself as the first practical method for assessing pre-trained models. Compared with brute-force fine-tuning, LogME brings at most $3000\\times$ speedup in wall-clock time and requires only $1%$ memory footprint. It outperforms prior methods by a large margin in their setting and is applicable to new settings. It is general enough for diverse pre-trained models (supervised pre-trained and unsupervised pre-trained), downstream tasks (classification and regression), and modalities (vision and language). Code is available at this repository: \\href{https://github.com/thuml/LogME}{https://github.com/thuml/LogME}.", "bibtex": "@InProceedings{pmlr-v139-you21b,\n title = \t {LogME: Practical Assessment of Pre-trained Models for Transfer Learning},\n author = {You, Kaichao and Liu, Yong and Wang, Jianmin and Long, Mingsheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12133--12143},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/you21b/you21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/you21b.html},\n abstract = \t {This paper studies task adaptive pre-trained model selection, an underexplored problem of assessing pre-trained models for the target task and select best ones from the model zoo \\emph{without fine-tuning}. A few pilot works addressed the problem in transferring supervised pre-trained models to classification tasks, but they cannot handle emerging unsupervised pre-trained models or regression tasks. In pursuit of a practical assessment method, we propose to estimate the maximum value of label evidence given features extracted by pre-trained models. Unlike the maximum likelihood, the maximum evidence is \\emph{immune to over-fitting}, while its expensive computation can be dramatically reduced by our carefully designed algorithm. The Logarithm of Maximum Evidence (LogME) can be used to assess pre-trained models for transfer learning: a pre-trained model with a high LogME value is likely to have good transfer performance. LogME is \\emph{fast, accurate, and general}, characterizing itself as the first practical method for assessing pre-trained models. Compared with brute-force fine-tuning, LogME brings at most $3000\\times$ speedup in wall-clock time and requires only $1%$ memory footprint. It outperforms prior methods by a large margin in their setting and is applicable to new settings. It is general enough for diverse pre-trained models (supervised pre-trained and unsupervised pre-trained), downstream tasks (classification and regression), and modalities (vision and language). Code is available at this repository: \\href{https://github.com/thuml/LogME}{https://github.com/thuml/LogME}.}\n}", "pdf": "http://proceedings.mlr.press/v139/you21b/you21b.pdf", "supp": "", "pdf_size": 1043065, "gs_citation": 231, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7398435047749789865&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "School of Software, BNRist, Tsinghua University, Beijing 100084, China; School of Software, BNRist, Tsinghua University, Beijing 100084, China; School of Software, BNRist, Tsinghua University, Beijing 100084, China; School of Software, BNRist, Tsinghua University, Beijing 100084, China", "aff_domain": "gmail.com; ; ;tsinghua.edu.cn", "email": "gmail.com; ; ;tsinghua.edu.cn", "github": "https://github.com/thuml/LogME", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/you21b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "School of Software", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Logarithmic Regret for Reinforcement Learning with Linear Function Approximation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10521", "id": "10521", "proceeding": "http://proceedings.mlr.press/v139/he21c.html", "slides": "", "author_site": "Jiafan He, Dongruo Zhou, Quanquan Gu", "author": "Jiafan He; Dongruo Zhou; Quanquan Gu", "abstract": "Reinforcement learning (RL) with linear function approximation has received increasing attention recently. However, existing work has focused on obtaining $\\sqrt{T}$-type regret bound, where $T$ is the number of interactions with the MDP. In this paper, we show that logarithmic regret is attainable under two recently proposed linear MDP assumptions provided that there exists a positive sub-optimality gap for the optimal action-value function. More specifically, under the linear MDP assumption (Jin et al., 2020), the LSVI-UCB algorithm can achieve $\\tilde{O}(d^{3}H^5/\\text{gap}_{\\text{min}}\\cdot \\log(T))$regret; and under the linear mixture MDP assumption (Ayoub et al., 2020), the UCRL-VTR algorithm can achieve $\\tilde{O}(d^{2}H^5/\\text{gap}_{\\text{min}}\\cdot \\log^3(T))$ regret, where $d$ is the dimension of feature mapping, $H$ is the length of episode, $\\text{gap}_{\\text{min}}$ is the minimal sub-optimality gap, and $\\tilde O$ hides all logarithmic terms except $\\log(T)$. To the best of our knowledge, these are the first logarithmic regret bounds for RL with linear function approximation. We also establish gap-dependent lower bounds for the two linear MDP models.", "bibtex": "@InProceedings{pmlr-v139-he21c,\n title = \t {Logarithmic Regret for Reinforcement Learning with Linear Function Approximation},\n author = {He, Jiafan and Zhou, Dongruo and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4171--4180},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/he21c/he21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/he21c.html},\n abstract = \t {Reinforcement learning (RL) with linear function approximation has received increasing attention recently. However, existing work has focused on obtaining $\\sqrt{T}$-type regret bound, where $T$ is the number of interactions with the MDP. In this paper, we show that logarithmic regret is attainable under two recently proposed linear MDP assumptions provided that there exists a positive sub-optimality gap for the optimal action-value function. More specifically, under the linear MDP assumption (Jin et al., 2020), the LSVI-UCB algorithm can achieve $\\tilde{O}(d^{3}H^5/\\text{gap}_{\\text{min}}\\cdot \\log(T))$regret; and under the linear mixture MDP assumption (Ayoub et al., 2020), the UCRL-VTR algorithm can achieve $\\tilde{O}(d^{2}H^5/\\text{gap}_{\\text{min}}\\cdot \\log^3(T))$ regret, where $d$ is the dimension of feature mapping, $H$ is the length of episode, $\\text{gap}_{\\text{min}}$ is the minimal sub-optimality gap, and $\\tilde O$ hides all logarithmic terms except $\\log(T)$. To the best of our knowledge, these are the first logarithmic regret bounds for RL with linear function approximation. We also establish gap-dependent lower bounds for the two linear MDP models.}\n}", "pdf": "http://proceedings.mlr.press/v139/he21c/he21c.pdf", "supp": "", "pdf_size": 348941, "gs_citation": 112, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11613798699529990981&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of California, Los Angeles, CA 90095, USA; Department of Computer Science, University of California, Los Angeles, CA 90095, USA; Department of Computer Science, University of California, Los Angeles, CA 90095, USA", "aff_domain": "cs.ucla.edu; ; ", "email": "cs.ucla.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/he21c.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Loss Surface Simplexes for Mode Connecting Volumes and Fast Ensembling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10573", "id": "10573", "proceeding": "http://proceedings.mlr.press/v139/benton21a.html", "slides": "", "author_site": "Gregory Benton, Wesley Maddox, Sanae Lotfi, Andrew Wilson", "author": "Gregory Benton; Wesley Maddox; Sanae Lotfi; Andrew Gordon Gordon Wilson", "abstract": "With a better understanding of the loss surfaces for multilayer networks, we can build more robust and accurate training procedures. Recently it was discovered that independently trained SGD solutions can be connected along one-dimensional paths of near-constant training loss. In this paper, we in fact demonstrate the existence of mode-connecting simplicial complexes that form multi-dimensional manifolds of low loss, connecting many independently trained models. Building on this discovery, we show how to efficiently construct simplicial complexes for fast ensembling, outperforming independently trained deep ensembles in accuracy, calibration, and robustness to dataset shift. Notably, our approach is easy to apply and only requires a few training epochs to discover a low-loss simplex.", "bibtex": "@InProceedings{pmlr-v139-benton21a,\n title = \t {Loss Surface Simplexes for Mode Connecting Volumes and Fast Ensembling},\n author = {Benton, Gregory and Maddox, Wesley and Lotfi, Sanae and Wilson, Andrew Gordon Gordon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {769--779},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/benton21a/benton21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/benton21a.html},\n abstract = \t {With a better understanding of the loss surfaces for multilayer networks, we can build more robust and accurate training procedures. Recently it was discovered that independently trained SGD solutions can be connected along one-dimensional paths of near-constant training loss. In this paper, we in fact demonstrate the existence of mode-connecting simplicial complexes that form multi-dimensional manifolds of low loss, connecting many independently trained models. Building on this discovery, we show how to efficiently construct simplicial complexes for fast ensembling, outperforming independently trained deep ensembles in accuracy, calibration, and robustness to dataset shift. Notably, our approach is easy to apply and only requires a few training epochs to discover a low-loss simplex.}\n}", "pdf": "http://proceedings.mlr.press/v139/benton21a/benton21a.pdf", "supp": "", "pdf_size": 2400065, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11311661921259603537&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "https://github.com/g-benton/loss-surface-simplexes", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/benton21a.html" }, { "title": "Lossless Compression of Efficient Private Local Randomizers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9967", "id": "9967", "proceeding": "http://proceedings.mlr.press/v139/feldman21a.html", "slides": "", "author_site": "Vitaly Feldman, Kunal Talwar", "author": "Vitaly Feldman; Kunal Talwar", "abstract": "Locally Differentially Private (LDP) Reports are commonly used for collection of statistics and machine learning in the federated setting. In many cases the best known LDP algorithms require sending prohibitively large messages from the client device to the server (such as when constructing histograms over a large domain or learning a high-dimensional model). Here we demonstrate a general approach that, under standard cryptographic assumptions, compresses every efficient LDP algorithm with negligible loss in privacy and utility guarantees. The practical implication of our result is that in typical applications every message can be compressed to the size of the server\u2019s pseudo-random generator seed. From this general approach we derive low-communication algorithms for the problems of frequency estimation and high-dimensional mean estimation. Our algorithms are simpler and more accurate than existing low-communication LDP algorithms for these well-studied problems.", "bibtex": "@InProceedings{pmlr-v139-feldman21a,\n title = \t {Lossless Compression of Efficient Private Local Randomizers},\n author = {Feldman, Vitaly and Talwar, Kunal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3208--3219},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feldman21a/feldman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/feldman21a.html},\n abstract = \t {Locally Differentially Private (LDP) Reports are commonly used for collection of statistics and machine learning in the federated setting. In many cases the best known LDP algorithms require sending prohibitively large messages from the client device to the server (such as when constructing histograms over a large domain or learning a high-dimensional model). Here we demonstrate a general approach that, under standard cryptographic assumptions, compresses every efficient LDP algorithm with negligible loss in privacy and utility guarantees. The practical implication of our result is that in typical applications every message can be compressed to the size of the server\u2019s pseudo-random generator seed. From this general approach we derive low-communication algorithms for the problems of frequency estimation and high-dimensional mean estimation. Our algorithms are simpler and more accurate than existing low-communication LDP algorithms for these well-studied problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/feldman21a/feldman21a.pdf", "supp": "", "pdf_size": 471353, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10786041617880818497&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Apple; Apple", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/feldman21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Apple", "aff_unique_dep": "Apple Inc.", "aff_unique_url": "https://www.apple.com", "aff_unique_abbr": "Apple", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Lottery Ticket Preserves Weight Correlation: Is It Desirable or Not?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9423", "id": "9423", "proceeding": "http://proceedings.mlr.press/v139/liu21aa.html", "slides": "", "author_site": "Ning Liu, Geng Yuan, Zhengping Che, Xuan Shen, Xiaolong Ma, Qing Jin, Jian Ren, Jian Tang, Sijia Liu, Yanzhi Wang", "author": "Ning Liu; Geng Yuan; Zhengping Che; Xuan Shen; Xiaolong Ma; Qing Jin; Jian Ren; Jian Tang; Sijia Liu; Yanzhi Wang", "abstract": "In deep model compression, the recent finding \"Lottery Ticket Hypothesis\" (LTH) pointed out that there could exist a winning ticket (i.e., a properly pruned sub-network together with original weight initialization) that can achieve competitive performance than the original dense network. However, it is not easy to observe such winning property in many scenarios, where for example, a relatively large learning rate is used even if it benefits training the original dense model. In this work, we investigate the underlying condition and rationale behind the winning property, and find that the underlying reason is largely attributed to the correlation between initialized weights and final-trained weights when the learning rate is not sufficiently large. Thus, the existence of winning property is correlated with an insufficient DNN pretraining, and is unlikely to occur for a well-trained DNN. To overcome this limitation, we propose the \"pruning & fine-tuning\" method that consistently outperforms lottery ticket sparse training under the same pruning algorithm and the same total training epochs. Extensive experiments over multiple deep models (VGG, ResNet, MobileNet-v2) on different datasets have been conducted to justify our proposals.", "bibtex": "@InProceedings{pmlr-v139-liu21aa,\n title = \t {Lottery Ticket Preserves Weight Correlation: Is It Desirable or Not?},\n author = {Liu, Ning and Yuan, Geng and Che, Zhengping and Shen, Xuan and Ma, Xiaolong and Jin, Qing and Ren, Jian and Tang, Jian and Liu, Sijia and Wang, Yanzhi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7011--7020},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21aa/liu21aa.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21aa.html},\n abstract = \t {In deep model compression, the recent finding \"Lottery Ticket Hypothesis\" (LTH) pointed out that there could exist a winning ticket (i.e., a properly pruned sub-network together with original weight initialization) that can achieve competitive performance than the original dense network. However, it is not easy to observe such winning property in many scenarios, where for example, a relatively large learning rate is used even if it benefits training the original dense model. In this work, we investigate the underlying condition and rationale behind the winning property, and find that the underlying reason is largely attributed to the correlation between initialized weights and final-trained weights when the learning rate is not sufficiently large. Thus, the existence of winning property is correlated with an insufficient DNN pretraining, and is unlikely to occur for a well-trained DNN. To overcome this limitation, we propose the \"pruning & fine-tuning\" method that consistently outperforms lottery ticket sparse training under the same pruning algorithm and the same total training epochs. Extensive experiments over multiple deep models (VGG, ResNet, MobileNet-v2) on different datasets have been conducted to justify our proposals.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21aa/liu21aa.pdf", "supp": "", "pdf_size": 6536568, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14209764781200681354&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Midea Group, Beijing, China; Northeastern University, Boston, MA, USA; Didi Chuxing, Beijing, China; Northeastern University, Boston, MA, USA; Northeastern University, Boston, MA, USA; Northeastern University, Boston, MA, USA; Snap Inc., CA, USA; Midea Group, Beijing, China; Michigan State University, MI, USA; Northeastern University, Boston, MA, USA", "aff_domain": "; ; ; ; ; ; ; ; ; ", "email": "; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v139/liu21aa.html", "aff_unique_index": "0;1;2;1;1;1;3;0;4;1", "aff_unique_norm": "Midea Group;Northeastern University;Didi Chuxing;Snap Inc.;Michigan State University", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.mideaglobal.com;https://www.northeastern.edu;https://www.didi.com;https://www.snapinc.com;https://www.msu.edu", "aff_unique_abbr": ";NEU;Didi;Snap;MSU", "aff_campus_unique_index": "1;1;1;1;2;1", "aff_campus_unique": ";Boston;East Lansing", "aff_country_unique_index": "0;1;0;1;1;1;1;0;1;1", "aff_country_unique": "China;United States" }, { "title": "Low-Precision Reinforcement Learning: Running Soft Actor-Critic in Half Precision", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10605", "id": "10605", "proceeding": "http://proceedings.mlr.press/v139/bjorck21a.html", "slides": "", "author_site": "Johan Bj\u00f6rck, Xiangyu Chen, Christopher De Sa, Carla Gomes, Kilian Weinberger", "author": "Johan Bj\u00f6rck; Xiangyu Chen; Christopher De Sa; Carla P Gomes; Kilian Weinberger", "abstract": "Low-precision training has become a popular approach to reduce compute requirements, memory footprint, and energy consumption in supervised learning. In contrast, this promising approach has not yet enjoyed similarly widespread adoption within the reinforcement learning (RL) community, partly because RL agents can be notoriously hard to train even in full precision. In this paper we consider continuous control with the state-of-the-art SAC agent and demonstrate that a na\u00efve adaptation of low-precision methods from supervised learning fails. We propose a set of six modifications, all straightforward to implement, that leaves the underlying agent and its hyperparameters unchanged but improves the numerical stability dramatically. The resulting modified SAC agent has lower memory and compute requirements while matching full-precision rewards, demonstrating that low-precision training can substantially accelerate state-of-the-art RL without parameter tuning.", "bibtex": "@InProceedings{pmlr-v139-bjorck21a,\n title = \t {Low-Precision Reinforcement Learning: Running Soft Actor-Critic in Half Precision},\n author = {Bj{\\\"o}rck, Johan and Chen, Xiangyu and De Sa, Christopher and Gomes, Carla P and Weinberger, Kilian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {980--991},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bjorck21a/bjorck21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bjorck21a.html},\n abstract = \t {Low-precision training has become a popular approach to reduce compute requirements, memory footprint, and energy consumption in supervised learning. In contrast, this promising approach has not yet enjoyed similarly widespread adoption within the reinforcement learning (RL) community, partly because RL agents can be notoriously hard to train even in full precision. In this paper we consider continuous control with the state-of-the-art SAC agent and demonstrate that a na\u00efve adaptation of low-precision methods from supervised learning fails. We propose a set of six modifications, all straightforward to implement, that leaves the underlying agent and its hyperparameters unchanged but improves the numerical stability dramatically. The resulting modified SAC agent has lower memory and compute requirements while matching full-precision rewards, demonstrating that low-precision training can substantially accelerate state-of-the-art RL without parameter tuning.}\n}", "pdf": "http://proceedings.mlr.press/v139/bjorck21a/bjorck21a.pdf", "supp": "", "pdf_size": 837386, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7045955362887097974&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Cornell University, USA; Department of Computer Science, Cornell University, USA; Department of Computer Science, Cornell University, USA; Department of Computer Science, Cornell University, USA; Department of Computer Science, Cornell University, USA", "aff_domain": "cornell.edu; ; ; ; ", "email": "cornell.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/bjorck21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Low-Rank Sinkhorn Factorization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8545", "id": "8545", "proceeding": "http://proceedings.mlr.press/v139/scetbon21a.html", "slides": "", "author_site": "Meyer Scetbon, Marco Cuturi, Gabriel Peyr\u00e9", "author": "Meyer Scetbon; Marco Cuturi; Gabriel Peyr\u00e9", "abstract": "Several recent applications of optimal transport (OT) theory to machine learning have relied on regularization, notably entropy and the Sinkhorn algorithm. Because matrix-vector products are pervasive in the Sinkhorn algorithm, several works have proposed to \\textit{approximate} kernel matrices appearing in its iterations using low-rank factors. Another route lies instead in imposing low-nonnegative rank constraints on the feasible set of couplings considered in OT problems, with no approximations on cost nor kernel matrices. This route was first explored by\u00a0\\citet{forrow2018statistical}, who proposed an algorithm tailored for the squared Euclidean ground cost, using a proxy objective that can be solved through the machinery of regularized 2-Wasserstein barycenters. Building on this, we introduce in this work a generic approach that aims at solving, in full generality, the OT problem under low-nonnegative rank constraints with arbitrary costs. Our algorithm relies on an explicit factorization of low-rank couplings as a product of \\textit{sub-coupling} factors linked by a common marginal; similar to an NMF approach, we alternatively updates these factors. We prove the non-asymptotic stationary convergence of this algorithm and illustrate its efficiency on benchmark experiments.", "bibtex": "@InProceedings{pmlr-v139-scetbon21a,\n title = \t {Low-Rank Sinkhorn Factorization},\n author = {Scetbon, Meyer and Cuturi, Marco and Peyr{\\'e}, Gabriel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9344--9354},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/scetbon21a/scetbon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/scetbon21a.html},\n abstract = \t {Several recent applications of optimal transport (OT) theory to machine learning have relied on regularization, notably entropy and the Sinkhorn algorithm. Because matrix-vector products are pervasive in the Sinkhorn algorithm, several works have proposed to \\textit{approximate} kernel matrices appearing in its iterations using low-rank factors. Another route lies instead in imposing low-nonnegative rank constraints on the feasible set of couplings considered in OT problems, with no approximations on cost nor kernel matrices. This route was first explored by\u00a0\\citet{forrow2018statistical}, who proposed an algorithm tailored for the squared Euclidean ground cost, using a proxy objective that can be solved through the machinery of regularized 2-Wasserstein barycenters. Building on this, we introduce in this work a generic approach that aims at solving, in full generality, the OT problem under low-nonnegative rank constraints with arbitrary costs. Our algorithm relies on an explicit factorization of low-rank couplings as a product of \\textit{sub-coupling} factors linked by a common marginal; similar to an NMF approach, we alternatively updates these factors. We prove the non-asymptotic stationary convergence of this algorithm and illustrate its efficiency on benchmark experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/scetbon21a/scetbon21a.pdf", "supp": "", "pdf_size": 3527362, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3196661915495161859&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "CREST, ENSAE; Google Brain; Ecole Normale Sup\u00e9rieure, PSL University + CNRS", "aff_domain": "ensae.fr;google.com;ens.fr", "email": "ensae.fr;google.com;ens.fr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/scetbon21a.html", "aff_unique_index": "0;1;2+3", "aff_unique_norm": "CREST;Google;Ecole Normale Sup\u00e9rieure;Centre National de la Recherche Scientifique", "aff_unique_dep": ";Google Brain;;", "aff_unique_url": "https://www.crest.fr;https://brain.google.com;https://www.ens.fr;https://www.cnrs.fr", "aff_unique_abbr": "CREST;Google Brain;ENS;CNRS", "aff_campus_unique_index": "1;", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;1;0+0", "aff_country_unique": "France;United States" }, { "title": "Lower Bounds on Cross-Entropy Loss in the Presence of Test-time Adversaries", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9501", "id": "9501", "proceeding": "http://proceedings.mlr.press/v139/bhagoji21a.html", "slides": "/media/icml-2021/Slides/9501.pdf", "author_site": "Arjun Nitin Bhagoji, Daniel Cullina, Vikash Sehwag, Prateek Mittal", "author": "Arjun Nitin Bhagoji; Daniel Cullina; Vikash Sehwag; Prateek Mittal", "abstract": "Understanding the fundamental limits of robust supervised learning has emerged as a problem of immense interest, from both practical and theoretical standpoints. In particular, it is critical to determine classifier-agnostic bounds on the training loss to establish when learning is possible. In this paper, we determine optimal lower bounds on the cross-entropy loss in the presence of test-time adversaries, along with the corresponding optimal classification outputs. Our formulation of the bound as a solution to an optimization problem is general enough to encompass any loss function depending on soft classifier outputs. We also propose and provide a proof of correctness for a bespoke algorithm to compute this lower bound efficiently, allowing us to determine lower bounds for multiple practical datasets of interest. We use our lower bounds as a diagnostic tool to determine the effectiveness of current robust training methods and find a gap from optimality at larger budgets. Finally, we investigate the possibility of using of optimal classification outputs as soft labels to empirically improve robust training.", "bibtex": "@InProceedings{pmlr-v139-bhagoji21a,\n title = \t {Lower Bounds on Cross-Entropy Loss in the Presence of Test-time Adversaries},\n author = {Bhagoji, Arjun Nitin and Cullina, Daniel and Sehwag, Vikash and Mittal, Prateek},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {863--873},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bhagoji21a/bhagoji21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bhagoji21a.html},\n abstract = \t {Understanding the fundamental limits of robust supervised learning has emerged as a problem of immense interest, from both practical and theoretical standpoints. In particular, it is critical to determine classifier-agnostic bounds on the training loss to establish when learning is possible. In this paper, we determine optimal lower bounds on the cross-entropy loss in the presence of test-time adversaries, along with the corresponding optimal classification outputs. Our formulation of the bound as a solution to an optimization problem is general enough to encompass any loss function depending on soft classifier outputs. We also propose and provide a proof of correctness for a bespoke algorithm to compute this lower bound efficiently, allowing us to determine lower bounds for multiple practical datasets of interest. We use our lower bounds as a diagnostic tool to determine the effectiveness of current robust training methods and find a gap from optimality at larger budgets. Finally, we investigate the possibility of using of optimal classification outputs as soft labels to empirically improve robust training.}\n}", "pdf": "http://proceedings.mlr.press/v139/bhagoji21a/bhagoji21a.pdf", "supp": "", "pdf_size": 352385, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9078439186014463953&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of Chicago; Department of Electrical and Computer Engineering, Pennsylvania State University; Department of Electrical Engineering, Princeton University; Department of Electrical Engineering, Princeton University", "aff_domain": "uchicago.edu; ; ; ", "email": "uchicago.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/bhagoji21a.html", "aff_unique_index": "0;1;2;2", "aff_unique_norm": "University of Chicago;Pennsylvania State University;Princeton University", "aff_unique_dep": "Department of Computer Science;Department of Electrical and Computer Engineering;Department of Electrical Engineering", "aff_unique_url": "https://www.uchicago.edu;https://www.psu.edu;https://www.princeton.edu", "aff_unique_abbr": "UChicago;PSU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Lower-Bounded Proper Losses for Weakly Supervised Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8643", "id": "8643", "proceeding": "http://proceedings.mlr.press/v139/yoshida21a.html", "slides": "", "author_site": "Shuhei M Yoshida, Takashi Takenouchi, Masashi Sugiyama", "author": "Shuhei M Yoshida; Takashi Takenouchi; Masashi Sugiyama", "abstract": "This paper discusses the problem of weakly supervised classification, in which instances are given weak labels that are produced by some label-corruption process. The goal is to derive conditions under which loss functions for weak-label learning are proper and lower-bounded\u2014two essential requirements for the losses used in class-probability estimation. To this end, we derive a representation theorem for proper losses in supervised learning, which dualizes the Savage representation. We use this theorem to characterize proper weak-label losses and find a condition for them to be lower-bounded. From these theoretical findings, we derive a novel regularization scheme called generalized logit squeezing, which makes any proper weak-label loss bounded from below, without losing properness. Furthermore, we experimentally demonstrate the effectiveness of our proposed approach, as compared to improper or unbounded losses. The results highlight the importance of properness and lower-boundedness.", "bibtex": "@InProceedings{pmlr-v139-yoshida21a,\n title = \t {Lower-Bounded Proper Losses for Weakly Supervised Classification},\n author = {Yoshida, Shuhei M and Takenouchi, Takashi and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12110--12120},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoshida21a/yoshida21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoshida21a.html},\n abstract = \t {This paper discusses the problem of weakly supervised classification, in which instances are given weak labels that are produced by some label-corruption process. The goal is to derive conditions under which loss functions for weak-label learning are proper and lower-bounded\u2014two essential requirements for the losses used in class-probability estimation. To this end, we derive a representation theorem for proper losses in supervised learning, which dualizes the Savage representation. We use this theorem to characterize proper weak-label losses and find a condition for them to be lower-bounded. From these theoretical findings, we derive a novel regularization scheme called generalized logit squeezing, which makes any proper weak-label loss bounded from below, without losing properness. Furthermore, we experimentally demonstrate the effectiveness of our proposed approach, as compared to improper or unbounded losses. The results highlight the importance of properness and lower-boundedness.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoshida21a/yoshida21a.pdf", "supp": "", "pdf_size": 1924428, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17541047076253957367&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Biometrics Research Laboratories, NEC Corporation, Kawasaki, Kanagawa, Japan+RIKEN Center for Advanced Intelligence Project, Chuo-ku, Tokyo, Japan+National Graduate Institute for Policy Studies, Minato-ku, Tokyo, Japan; RIKEN Center for Advanced Intelligence Project, Chuo-ku, Tokyo, Japan+National Graduate Institute for Policy Studies, Minato-ku, Tokyo, Japan+Department of Complexity Science and Engineering, The University of Tokyo, Kashiwa, Chiba, Japan; RIKEN Center for Advanced Intelligence Project, Chuo-ku, Tokyo, Japan+Department of Complexity Science and Engineering, The University of Tokyo, Kashiwa, Chiba, Japan", "aff_domain": "nec.com; ; ", "email": "nec.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yoshida21a.html", "aff_unique_index": "0+1+2;1+2+3;1+3", "aff_unique_norm": "NEC Corporation;RIKEN Center for Advanced Intelligence Project;National Graduate Institute for Policy Studies;University of Tokyo", "aff_unique_dep": "Biometrics Research Laboratories;Center for Advanced Intelligence Project;;Department of Complexity Science and Engineering", "aff_unique_url": "https://www.nec.com;https://www.riken.jp/en/crai/;https://www.grIPS.ac.jp;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "NEC;RIKEN;GRIPS;UTokyo", "aff_campus_unique_index": "0+1+1;1+1+2;1+2", "aff_campus_unique": "Kawasaki;Tokyo;Kashiwa", "aff_country_unique_index": "0+0+0;0+0+0;0+0", "aff_country_unique": "Japan" }, { "title": "MARINA: Faster Non-Convex Distributed Learning with Compression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9657", "id": "9657", "proceeding": "http://proceedings.mlr.press/v139/gorbunov21a.html", "slides": "/media/icml-2021/Slides/9657.pdf", "author_site": "Eduard Gorbunov, Konstantin Burlachenko, Zhize Li, Peter Richtarik", "author": "Eduard Gorbunov; Konstantin P. Burlachenko; Zhize Li; Peter Richtarik", "abstract": "We develop and analyze MARINA: a new communication efficient method for non-convex distributed learning over heterogeneous datasets. MARINA employs a novel communication compression strategy based on the compression of gradient differences that is reminiscent of but different from the strategy employed in the DIANA method of Mishchenko et al. (2019). Unlike virtually all competing distributed first-order methods, including DIANA, ours is based on a carefully designed biased gradient estimator, which is the key to its superior theoretical and practical performance. The communication complexity bounds we prove for MARINA are evidently better than those of all previous first-order methods. Further, we develop and analyze two variants of MARINA: VR-MARINA and PP-MARINA. The first method is designed for the case when the local loss functions owned by clients are either of a finite sum or of an expectation form, and the second method allows for a partial participation of clients {\u2013} a feature important in federated learning. All our methods are superior to previous state-of-the-art methods in terms of oracle/communication complexity. Finally, we provide a convergence analysis of all methods for problems satisfying the Polyak-{\u0141}ojasiewicz condition.", "bibtex": "@InProceedings{pmlr-v139-gorbunov21a,\n title = \t {MARINA: Faster Non-Convex Distributed Learning with Compression},\n author = {Gorbunov, Eduard and Burlachenko, Konstantin P. and Li, Zhize and Richtarik, Peter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3788--3798},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gorbunov21a/gorbunov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gorbunov21a.html},\n abstract = \t {We develop and analyze MARINA: a new communication efficient method for non-convex distributed learning over heterogeneous datasets. MARINA employs a novel communication compression strategy based on the compression of gradient differences that is reminiscent of but different from the strategy employed in the DIANA method of Mishchenko et al. (2019). Unlike virtually all competing distributed first-order methods, including DIANA, ours is based on a carefully designed biased gradient estimator, which is the key to its superior theoretical and practical performance. The communication complexity bounds we prove for MARINA are evidently better than those of all previous first-order methods. Further, we develop and analyze two variants of MARINA: VR-MARINA and PP-MARINA. The first method is designed for the case when the local loss functions owned by clients are either of a finite sum or of an expectation form, and the second method allows for a partial participation of clients {\u2013} a feature important in federated learning. All our methods are superior to previous state-of-the-art methods in terms of oracle/communication complexity. Finally, we provide a convergence analysis of all methods for problems satisfying the Polyak-{\u0141}ojasiewicz condition.}\n}", "pdf": "http://proceedings.mlr.press/v139/gorbunov21a/gorbunov21a.pdf", "supp": "", "pdf_size": 1495526, "gs_citation": 145, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6014843650767988680&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Moscow Institute of Physics and Technology, Moscow, Russia+Yandex, Moscow, Russia+King Abdullah University of Science and Technology, Thuwal, Saudi Arabia; King Abdullah University of Science and Technology, Thuwal, Saudi Arabia; King Abdullah University of Science and Technology, Thuwal, Saudi Arabia; King Abdullah University of Science and Technology, Thuwal, Saudi Arabia", "aff_domain": "phystech.edu; ; ;kaust.edu.sa", "email": "phystech.edu; ; ;kaust.edu.sa", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gorbunov21a.html", "aff_unique_index": "0+1+2;2;2;2", "aff_unique_norm": "Moscow Institute of Physics and Technology;Yandex;King Abdullah University of Science and Technology", "aff_unique_dep": ";;", "aff_unique_url": "https://www.mipt.ru/en;https://yandex.com;https://www.kast.kau.edu.sa", "aff_unique_abbr": "MIPT;Yandex;KAUST", "aff_campus_unique_index": "0+0+1;1;1;1", "aff_campus_unique": "Moscow;Thuwal", "aff_country_unique_index": "0+0+1;1;1;1", "aff_country_unique": "Russian Federation;Saudi Arabia" }, { "title": "MC-LSTM: Mass-Conserving LSTM", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10425", "id": "10425", "proceeding": "http://proceedings.mlr.press/v139/hoedt21a.html", "slides": "", "author_site": "Pieter-Jan Hoedt, Frederik Kratzert, Daniel Klotz, Christina Halmich, Markus Holzleitner, Grey Nearing, Sepp Hochreiter, G\u00fcnter Klambauer", "author": "Pieter-Jan Hoedt; Frederik Kratzert; Daniel Klotz; Christina Halmich; Markus Holzleitner; Grey S Nearing; Sepp Hochreiter; Guenter Klambauer", "abstract": "The success of Convolutional Neural Networks (CNNs) in computer vision is mainly driven by their strong inductive bias, which is strong enough to allow CNNs to solve vision-related tasks with random weights, meaning without learning. Similarly, Long Short-Term Memory (LSTM) has a strong inductive bias towards storing information over time. However, many real-world systems are governed by conservation laws, which lead to the redistribution of particular quantities {\u2014} e.g.in physical and economical systems. Our novel Mass-Conserving LSTM (MC-LSTM) adheres to these conservation laws by extending the inductive bias of LSTM to model the redistribution of those stored quantities. MC-LSTMs set a new state-of-the-art for neural arithmetic units at learning arithmetic operations, such as addition tasks,which have a strong conservation law, as the sum is constant over time. Further, MC-LSTM is applied to traffic forecasting, modeling a pendulum, and a large benchmark dataset in hydrology, where it sets a new state-of-the-art for predicting peak flows. In the hydrology example, we show that MC-LSTM states correlate with real world processes and are therefore interpretable.", "bibtex": "@InProceedings{pmlr-v139-hoedt21a,\n title = \t {MC-LSTM: Mass-Conserving LSTM},\n author = {Hoedt, Pieter-Jan and Kratzert, Frederik and Klotz, Daniel and Halmich, Christina and Holzleitner, Markus and Nearing, Grey S and Hochreiter, Sepp and Klambauer, Guenter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4275--4286},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hoedt21a/hoedt21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hoedt21a.html},\n abstract = \t {The success of Convolutional Neural Networks (CNNs) in computer vision is mainly driven by their strong inductive bias, which is strong enough to allow CNNs to solve vision-related tasks with random weights, meaning without learning. Similarly, Long Short-Term Memory (LSTM) has a strong inductive bias towards storing information over time. However, many real-world systems are governed by conservation laws, which lead to the redistribution of particular quantities {\u2014} e.g.in physical and economical systems. Our novel Mass-Conserving LSTM (MC-LSTM) adheres to these conservation laws by extending the inductive bias of LSTM to model the redistribution of those stored quantities. MC-LSTMs set a new state-of-the-art for neural arithmetic units at learning arithmetic operations, such as addition tasks,which have a strong conservation law, as the sum is constant over time. Further, MC-LSTM is applied to traffic forecasting, modeling a pendulum, and a large benchmark dataset in hydrology, where it sets a new state-of-the-art for predicting peak flows. In the hydrology example, we show that MC-LSTM states correlate with real world processes and are therefore interpretable.}\n}", "pdf": "http://proceedings.mlr.press/v139/hoedt21a/hoedt21a.pdf", "supp": "", "pdf_size": 1619234, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4541460761992496905&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria; ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria; ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria; ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria; ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria; Google Research, Mountain View, CA, USA; ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria + Institute of Advanced Research in Artificial Intelligence (IARAI); ELLIS Unit Linz, LIT AI Lab, Institute for Machine Learning, Johannes Kepler University Linz, Austria", "aff_domain": "ml.jku.at;ml.jku.at; ; ; ; ; ; ", "email": "ml.jku.at;ml.jku.at; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/hoedt21a.html", "aff_unique_index": "0;0;0;0;0;1;0+2;0", "aff_unique_norm": "Johannes Kepler University Linz;Google;Institute of Advanced Research in Artificial Intelligence", "aff_unique_dep": "Institute for Machine Learning;Google Research;", "aff_unique_url": "https://www.jku.at;https://research.google;https://www.ia-rai.at", "aff_unique_abbr": "JKU;Google;IARAI", "aff_campus_unique_index": "0;0;0;0;0;1;0;0", "aff_campus_unique": "Linz;Mountain View;", "aff_country_unique_index": "0;0;0;0;0;1;0+0;0", "aff_country_unique": "Austria;United States" }, { "title": "MOTS: Minimax Optimal Thompson Sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9037", "id": "9037", "proceeding": "http://proceedings.mlr.press/v139/jin21d.html", "slides": "/media/icml-2021/Slides/9037.pdf", "author_site": "Tianyuan Jin, Pan Xu, Jieming Shi, Xiaokui Xiao, Quanquan Gu", "author": "Tianyuan Jin; Pan Xu; Jieming Shi; Xiaokui Xiao; Quanquan Gu", "abstract": "Thompson sampling is one of the most widely used algorithms in many online decision problems due to its simplicity for implementation and superior empirical performance over other state-of-the-art methods. Despite its popularity and empirical success, it has remained an open problem whether Thompson sampling can achieve the minimax optimal regret O(\\sqrt{TK}) for K-armed bandit problems, where T is the total time horizon. In this paper we fill this long open gap by proposing a new Thompson sampling algorithm called MOTS that adaptively truncates the sampling result of the chosen arm at each time step. We prove that this simple variant of Thompson sampling achieves the minimax optimal regret bound O(\\sqrt{TK}) for finite time horizon T and also the asymptotic optimal regret bound when $T$ grows to infinity as well. This is the first time that the minimax optimality of multi-armed bandit problems has been attained by Thompson sampling type of algorithms.", "bibtex": "@InProceedings{pmlr-v139-jin21d,\n title = \t {MOTS: Minimax Optimal Thompson Sampling},\n author = {Jin, Tianyuan and Xu, Pan and Shi, Jieming and Xiao, Xiaokui and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5074--5083},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jin21d/jin21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/jin21d.html},\n abstract = \t {Thompson sampling is one of the most widely used algorithms in many online decision problems due to its simplicity for implementation and superior empirical performance over other state-of-the-art methods. Despite its popularity and empirical success, it has remained an open problem whether Thompson sampling can achieve the minimax optimal regret O(\\sqrt{TK}) for K-armed bandit problems, where T is the total time horizon. In this paper we fill this long open gap by proposing a new Thompson sampling algorithm called MOTS that adaptively truncates the sampling result of the chosen arm at each time step. We prove that this simple variant of Thompson sampling achieves the minimax optimal regret bound O(\\sqrt{TK}) for finite time horizon T and also the asymptotic optimal regret bound when $T$ grows to infinity as well. This is the first time that the minimax optimality of multi-armed bandit problems has been attained by Thompson sampling type of algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/jin21d/jin21d.pdf", "supp": "", "pdf_size": 3136214, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5114694492533648717&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computing, National University of Singapore, Singapore; Department of Computer Science, University of California, Los Angeles, USA; Department of Computing, The Hong Kong Polytechnic University, Hong Kong; School of Computing, National University of Singapore, Singapore; Department of Computer Science, University of California, Los Angeles, USA", "aff_domain": "nus.edu.sg;cs.ucla.edu; ; ;", "email": "nus.edu.sg;cs.ucla.edu; ; ;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/jin21d.html", "aff_unique_index": "0;1;2;0;1", "aff_unique_norm": "National University of Singapore;University of California, Los Angeles;Hong Kong Polytechnic University", "aff_unique_dep": "School of Computing;Department of Computer Science;Department of Computing", "aff_unique_url": "https://www.nus.edu.sg;https://www.ucla.edu;https://www.polyu.edu.hk", "aff_unique_abbr": "NUS;UCLA;PolyU", "aff_campus_unique_index": "1;2;1", "aff_campus_unique": ";Los Angeles;Hong Kong SAR", "aff_country_unique_index": "0;1;2;0;1", "aff_country_unique": "Singapore;United States;China" }, { "title": "MSA Transformer", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9905", "id": "9905", "proceeding": "http://proceedings.mlr.press/v139/rao21a.html", "slides": "", "author_site": "Roshan Rao, Jason Liu, Robert Verkuil, Joshua Meier, John Canny, Pieter Abbeel, Tom Sercu, Alexander Rives", "author": "Roshan M Rao; Jason Liu; Robert Verkuil; Joshua Meier; John Canny; Pieter Abbeel; Tom Sercu; Alexander Rives", "abstract": "Unsupervised protein language models trained across millions of diverse sequences learn structure and function of proteins. Protein language models studied to date have been trained to perform inference from individual sequences. The longstanding approach in computational biology has been to make inferences from a family of evolutionarily related sequences by fitting a model to each family independently. In this work we combine the two paradigms. We introduce a protein language model which takes as input a set of sequences in the form of a multiple sequence alignment. The model interleaves row and column attention across the input sequences and is trained with a variant of the masked language modeling objective across many protein families. The performance of the model surpasses current state-of-the-art unsupervised structure learning methods by a wide margin, with far greater parameter efficiency than prior state-of-the-art protein language models.", "bibtex": "@InProceedings{pmlr-v139-rao21a,\n title = \t {MSA Transformer},\n author = {Rao, Roshan M and Liu, Jason and Verkuil, Robert and Meier, Joshua and Canny, John and Abbeel, Pieter and Sercu, Tom and Rives, Alexander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8844--8856},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rao21a/rao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rao21a.html},\n abstract = \t {Unsupervised protein language models trained across millions of diverse sequences learn structure and function of proteins. Protein language models studied to date have been trained to perform inference from individual sequences. The longstanding approach in computational biology has been to make inferences from a family of evolutionarily related sequences by fitting a model to each family independently. In this work we combine the two paradigms. We introduce a protein language model which takes as input a set of sequences in the form of a multiple sequence alignment. The model interleaves row and column attention across the input sequences and is trained with a variant of the masked language modeling objective across many protein families. The performance of the model surpasses current state-of-the-art unsupervised structure learning methods by a wide margin, with far greater parameter efficiency than prior state-of-the-art protein language models.}\n}", "pdf": "http://proceedings.mlr.press/v139/rao21a/rao21a.pdf", "supp": "", "pdf_size": 3437208, "gs_citation": 754, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14297326050201291557&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "UC Berkeley + Work performed during internship at FAIR; Facebook AI Research; Facebook AI Research; Facebook AI Research; UC Berkeley; UC Berkeley; Facebook AI Research; Facebook AI Research + New York University", "aff_domain": "berkeley.edu; ; ; ; ; ; ;fb.com", "email": "berkeley.edu; ; ; ; ; ; ;fb.com", "github": "https://github.com/facebookresearch/esm", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/rao21a.html", "aff_unique_index": "0+1;1;1;1;0;0;1;1+2", "aff_unique_norm": "University of California, Berkeley;Meta;New York University", "aff_unique_dep": ";Facebook AI Research;", "aff_unique_url": "https://www.berkeley.edu;https://ai.facebook.com;https://www.nyu.edu", "aff_unique_abbr": "UC Berkeley;FAIR;NYU", "aff_campus_unique_index": "0;0;0;", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0+0;0;0;0;0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "MURAL: Meta-Learning Uncertainty-Aware Rewards for Outcome-Driven Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8943", "id": "8943", "proceeding": "http://proceedings.mlr.press/v139/li21g.html", "slides": "/media/icml-2021/Slides/8943.pdf", "author_site": "Kevin Li, Abhishek Gupta, Ashwin D Reddy, Vitchyr Pong, Aurick Zhou, Justin Yu, Sergey Levine", "author": "Kevin Li; Abhishek Gupta; Ashwin Reddy; Vitchyr H Pong; Aurick Zhou; Justin Yu; Sergey Levine", "abstract": "Exploration in reinforcement learning is, in general, a challenging problem. A common technique to make learning easier is providing demonstrations from a human supervisor, but such demonstrations can be expensive and time-consuming to acquire. In this work, we study a more tractable class of reinforcement learning problems defined simply by examples of successful outcome states, which can be much easier to provide while still making the exploration problem more tractable. In this problem setting, the reward function can be obtained automatically by training a classifier to categorize states as successful or not. However, as we will show, this requires the classifier to make uncertainty-aware predictions that are very difficult using standard techniques for training deep networks. To address this, we propose a novel mechanism for obtaining calibrated uncertainty based on an amortized technique for computing the normalized maximum likelihood (NML) distribution, leveraging tools from meta-learning to make this distribution tractable. We show that the resulting algorithm has a number of intriguing connections to both count-based exploration methods and prior algorithms for learning reward functions, while also providing more effective guidance towards the goal. We demonstrate that our algorithm solves a number of challenging navigation and robotic manipulation tasks which prove difficult or impossible for prior methods.", "bibtex": "@InProceedings{pmlr-v139-li21g,\n title = \t {MURAL: Meta-Learning Uncertainty-Aware Rewards for Outcome-Driven Reinforcement Learning},\n author = {Li, Kevin and Gupta, Abhishek and Reddy, Ashwin and Pong, Vitchyr H and Zhou, Aurick and Yu, Justin and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6346--6356},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21g/li21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21g.html},\n abstract = \t {Exploration in reinforcement learning is, in general, a challenging problem. A common technique to make learning easier is providing demonstrations from a human supervisor, but such demonstrations can be expensive and time-consuming to acquire. In this work, we study a more tractable class of reinforcement learning problems defined simply by examples of successful outcome states, which can be much easier to provide while still making the exploration problem more tractable. In this problem setting, the reward function can be obtained automatically by training a classifier to categorize states as successful or not. However, as we will show, this requires the classifier to make uncertainty-aware predictions that are very difficult using standard techniques for training deep networks. To address this, we propose a novel mechanism for obtaining calibrated uncertainty based on an amortized technique for computing the normalized maximum likelihood (NML) distribution, leveraging tools from meta-learning to make this distribution tractable. We show that the resulting algorithm has a number of intriguing connections to both count-based exploration methods and prior algorithms for learning reward functions, while also providing more effective guidance towards the goal. We demonstrate that our algorithm solves a number of challenging navigation and robotic manipulation tasks which prove difficult or impossible for prior methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21g/li21g.pdf", "supp": "", "pdf_size": 4708597, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11269871123445903519&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley; Department of Electrical Engineering and Computer Sciences, UC Berkeley", "aff_domain": "berkeley.edu;berkeley.edu; ; ; ; ; ", "email": "berkeley.edu;berkeley.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/li21g.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Machine Unlearning for Random Forests", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10523", "id": "10523", "proceeding": "http://proceedings.mlr.press/v139/brophy21a.html", "slides": "/media/icml-2021/Slides/10523_jA4THbK.pdf", "author_site": "Jonathan Brophy, Daniel Lowd", "author": "Jonathan Brophy; Daniel Lowd", "abstract": "Responding to user data deletion requests, removing noisy examples, or deleting corrupted training data are just a few reasons for wanting to delete instances from a machine learning (ML) model. However, efficiently removing this data from an ML model is generally difficult. In this paper, we introduce data removal-enabled (DaRE) forests, a variant of random forests that enables the removal of training data with minimal retraining. Model updates for each DaRE tree in the forest are exact, meaning that removing instances from a DaRE model yields exactly the same model as retraining from scratch on updated data. DaRE trees use randomness and caching to make data deletion efficient. The upper levels of DaRE trees use random nodes, which choose split attributes and thresholds uniformly at random. These nodes rarely require updates because they only minimally depend on the data. At the lower levels, splits are chosen to greedily optimize a split criterion such as Gini index or mutual information. DaRE trees cache statistics at each node and training data at each leaf, so that only the necessary subtrees are updated as data is removed. For numerical attributes, greedy nodes optimize over a random subset of thresholds, so that they can maintain statistics while approximating the optimal threshold. By adjusting the number of thresholds considered for greedy nodes, and the number of random nodes, DaRE trees can trade off between more accurate predictions and more efficient updates. In experiments on 13 real-world datasets and one synthetic dataset, we find DaRE forests delete data orders of magnitude faster than retraining from scratch while sacrificing little to no predictive power.", "bibtex": "@InProceedings{pmlr-v139-brophy21a,\n title = \t {Machine Unlearning for Random Forests},\n author = {Brophy, Jonathan and Lowd, Daniel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1092--1104},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brophy21a/brophy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brophy21a.html},\n abstract = \t {Responding to user data deletion requests, removing noisy examples, or deleting corrupted training data are just a few reasons for wanting to delete instances from a machine learning (ML) model. However, efficiently removing this data from an ML model is generally difficult. In this paper, we introduce data removal-enabled (DaRE) forests, a variant of random forests that enables the removal of training data with minimal retraining. Model updates for each DaRE tree in the forest are exact, meaning that removing instances from a DaRE model yields exactly the same model as retraining from scratch on updated data. DaRE trees use randomness and caching to make data deletion efficient. The upper levels of DaRE trees use random nodes, which choose split attributes and thresholds uniformly at random. These nodes rarely require updates because they only minimally depend on the data. At the lower levels, splits are chosen to greedily optimize a split criterion such as Gini index or mutual information. DaRE trees cache statistics at each node and training data at each leaf, so that only the necessary subtrees are updated as data is removed. For numerical attributes, greedy nodes optimize over a random subset of thresholds, so that they can maintain statistics while approximating the optimal threshold. By adjusting the number of thresholds considered for greedy nodes, and the number of random nodes, DaRE trees can trade off between more accurate predictions and more efficient updates. In experiments on 13 real-world datasets and one synthetic dataset, we find DaRE forests delete data orders of magnitude faster than retraining from scratch while sacrificing little to no predictive power.}\n}", "pdf": "http://proceedings.mlr.press/v139/brophy21a/brophy21a.pdf", "supp": "", "pdf_size": 1296139, "gs_citation": 207, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14941377472000813584&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer and Information Science, University of Oregon, Eugene, Oregon; Department of Computer and Information Science, University of Oregon, Eugene, Oregon", "aff_domain": "cs.uoregon.edu; ", "email": "cs.uoregon.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/brophy21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Oregon", "aff_unique_dep": "Department of Computer and Information Science", "aff_unique_url": "https://www.uoregon.edu", "aff_unique_abbr": "UO", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Eugene", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Making Paper Reviewing Robust to Bid Manipulation Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10609", "id": "10609", "proceeding": "http://proceedings.mlr.press/v139/wu21b.html", "slides": "", "author_site": "Ruihan Wu, Chuan Guo, Felix Wu, Rahul Kidambi, Laurens van der Maaten, Kilian Weinberger", "author": "Ruihan Wu; Chuan Guo; Felix Wu; Rahul Kidambi; Laurens Van Der Maaten; Kilian Weinberger", "abstract": "Most computer science conferences rely on paper bidding to assign reviewers to papers. Although paper bidding enables high-quality assignments in days of unprecedented submission numbers, it also opens the door for dishonest reviewers to adversarially influence paper reviewing assignments. Anecdotal evidence suggests that some reviewers bid on papers by \"friends\" or colluding authors, even though these papers are outside their area of expertise, and recommend them for acceptance without considering the merit of the work. In this paper, we study the efficacy of such bid manipulation attacks and find that, indeed, they can jeopardize the integrity of the review process. We develop a novel approach for paper bidding and assignment that is much more robust against such attacks. We show empirically that our approach provides robustness even when dishonest reviewers collude, have full knowledge of the assignment system\u2019s internal workings, and have access to the system\u2019s inputs. In addition to being more robust, the quality of our paper review assignments is comparable to that of current, non-robust assignment approaches.", "bibtex": "@InProceedings{pmlr-v139-wu21b,\n title = \t {Making Paper Reviewing Robust to Bid Manipulation Attacks},\n author = {Wu, Ruihan and Guo, Chuan and Wu, Felix and Kidambi, Rahul and Van Der Maaten, Laurens and Weinberger, Kilian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11240--11250},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21b/wu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21b.html},\n abstract = \t {Most computer science conferences rely on paper bidding to assign reviewers to papers. Although paper bidding enables high-quality assignments in days of unprecedented submission numbers, it also opens the door for dishonest reviewers to adversarially influence paper reviewing assignments. Anecdotal evidence suggests that some reviewers bid on papers by \"friends\" or colluding authors, even though these papers are outside their area of expertise, and recommend them for acceptance without considering the merit of the work. In this paper, we study the efficacy of such bid manipulation attacks and find that, indeed, they can jeopardize the integrity of the review process. We develop a novel approach for paper bidding and assignment that is much more robust against such attacks. We show empirically that our approach provides robustness even when dishonest reviewers collude, have full knowledge of the assignment system\u2019s internal workings, and have access to the system\u2019s inputs. In addition to being more robust, the quality of our paper review assignments is comparable to that of current, non-robust assignment approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21b/wu21b.pdf", "supp": "", "pdf_size": 1539103, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3106264104832629742&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Cornell University; Facebook AI Research; ASAPP; Amazon Search & AI; Facebook AI Research; Department of Computer Science, Cornell University", "aff_domain": "cornell.edu;fb.com; ; ; ; ", "email": "cornell.edu;fb.com; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/wu21b.html", "aff_unique_index": "0;1;2;3;1;0", "aff_unique_norm": "Cornell University;Meta;ASAPP;Amazon", "aff_unique_dep": "Department of Computer Science;Facebook AI Research;;Search & AI", "aff_unique_url": "https://www.cornell.edu;https://research.facebook.com;https://www.asapp.com;https://www.amazon.com", "aff_unique_abbr": "Cornell;FAIR;ASAPP;Amazon", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Making transport more robust and interpretable by moving data through a small number of anchor points", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10355", "id": "10355", "proceeding": "http://proceedings.mlr.press/v139/lin21a.html", "slides": "", "author_site": "Chi-Heng Lin, Mehdi Azabou, Eva Dyer", "author": "Chi-Heng Lin; Mehdi Azabou; Eva Dyer", "abstract": "Optimal transport (OT) is a widely used technique for distribution alignment, with applications throughout the machine learning, graphics, and vision communities. Without any additional structural assumptions on transport, however, OT can be fragile to outliers or noise, especially in high dimensions. Here, we introduce Latent Optimal Transport (LOT), a new approach for OT that simultaneously learns low-dimensional structure in data while leveraging this structure to solve the alignment task. The idea behind our approach is to learn two sets of \u201canchors\u201d that constrain the flow of transport between a source and target distribution. In both theoretical and empirical studies, we show that LOT regularizes the rank of transport and makes it more robust to outliers and the sampling density. We show that by allowing the source and target to have different anchors, and using LOT to align the latent spaces between anchors, the resulting transport plan has better structural interpretability and highlights connections between both the individual data points and the local geometry of the datasets.", "bibtex": "@InProceedings{pmlr-v139-lin21a,\n title = \t {Making transport more robust and interpretable by moving data through a small number of anchor points},\n author = {Lin, Chi-Heng and Azabou, Mehdi and Dyer, Eva},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6631--6641},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lin21a/lin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lin21a.html},\n abstract = \t {Optimal transport (OT) is a widely used technique for distribution alignment, with applications throughout the machine learning, graphics, and vision communities. Without any additional structural assumptions on transport, however, OT can be fragile to outliers or noise, especially in high dimensions. Here, we introduce Latent Optimal Transport (LOT), a new approach for OT that simultaneously learns low-dimensional structure in data while leveraging this structure to solve the alignment task. The idea behind our approach is to learn two sets of \u201canchors\u201d that constrain the flow of transport between a source and target distribution. In both theoretical and empirical studies, we show that LOT regularizes the rank of transport and makes it more robust to outliers and the sampling density. We show that by allowing the source and target to have different anchors, and using LOT to align the latent spaces between anchors, the resulting transport plan has better structural interpretability and highlights connections between both the individual data points and the local geometry of the datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/lin21a/lin21a.pdf", "supp": "", "pdf_size": 9766908, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14045713528225441550&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Electrical and Computer Engineering, Georgia Tech, Atlanta, Georgia, USA+Machine Learning Program, Georgia Tech, Atlanta, Georgia, USA+Coulter Department of Biomedical Engineering, Georgia Tech & Emory University, Atlanta, Georgia, USA; Department of Electrical and Computer Engineering, Georgia Tech, Atlanta, Georgia, USA+Machine Learning Program, Georgia Tech, Atlanta, Georgia, USA; Department of Electrical and Computer Engineering, Georgia Tech, Atlanta, Georgia, USA+Machine Learning Program, Georgia Tech, Atlanta, Georgia, USA+Coulter Department of Biomedical Engineering, Georgia Tech & Emory University, Atlanta, Georgia, USA", "aff_domain": "gatech.edu;gatech.edu;gatech.edu", "email": "gatech.edu;gatech.edu;gatech.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lin21a.html", "aff_unique_index": "0+1+2;0+1;0+1+2", "aff_unique_norm": "Georgia Tech;Georgia Institute of Technology;Georgia Tech & Emory University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Machine Learning Program;Coulter Department of Biomedical Engineering", "aff_unique_url": "https://www.gatech.edu;https://www.gatech.edu;https://www.gatech.edu, https://www.emory.edu", "aff_unique_abbr": "GT;Georgia Tech;Georgia Tech & Emory", "aff_campus_unique_index": "0+0+0;0+0;0+0+0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0+0+0;0+0;0+0+0", "aff_country_unique": "United States" }, { "title": "Mandoline: Model Evaluation under Distribution Shift", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9313", "id": "9313", "proceeding": "http://proceedings.mlr.press/v139/chen21i.html", "slides": "/media/icml-2021/Slides/9313.pdf", "author_site": "Mayee Chen, Karan Goel, Nimit Sohoni, Fait Poms, Kayvon Fatahalian, Christopher Re", "author": "Mayee Chen; Karan Goel; Nimit S Sohoni; Fait Poms; Kayvon Fatahalian; Christopher Re", "abstract": "Machine learning models are often deployed in different settings than they were trained and validated on, posing a challenge to practitioners who wish to predict how well the deployed model will perform on a target distribution. If an unlabeled sample from the target distribution is available, along with a labeled sample from a possibly different source distribution, standard approaches such as importance weighting can be applied to estimate performance on the target. However, importance weighting struggles when the source and target distributions have non-overlapping support or are high-dimensional. Taking inspiration from fields such as epidemiology and polling, we develop Mandoline, a new evaluation framework that mitigates these issues. Our key insight is that practitioners may have prior knowledge about the ways in which the distribution shifts, which we can use to better guide the importance weighting procedure. Specifically, users write simple \"slicing functions\" {\u2013} noisy, potentially correlated binary functions intended to capture possible axes of distribution shift {\u2013} to compute reweighted performance estimates. We further describe a density ratio estimation framework for the slices and show how its estimation error scales with slice quality and dataset size. Empirical validation on NLP and vision tasks shows that Mandoline can estimate performance on the target distribution up to 3x more accurately compared to standard baselines.", "bibtex": "@InProceedings{pmlr-v139-chen21i,\n title = \t {Mandoline: Model Evaluation under Distribution Shift},\n author = {Chen, Mayee and Goel, Karan and Sohoni, Nimit S and Poms, Fait and Fatahalian, Kayvon and Re, Christopher},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1617--1629},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21i/chen21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21i.html},\n abstract = \t {Machine learning models are often deployed in different settings than they were trained and validated on, posing a challenge to practitioners who wish to predict how well the deployed model will perform on a target distribution. If an unlabeled sample from the target distribution is available, along with a labeled sample from a possibly different source distribution, standard approaches such as importance weighting can be applied to estimate performance on the target. However, importance weighting struggles when the source and target distributions have non-overlapping support or are high-dimensional. Taking inspiration from fields such as epidemiology and polling, we develop Mandoline, a new evaluation framework that mitigates these issues. Our key insight is that practitioners may have prior knowledge about the ways in which the distribution shifts, which we can use to better guide the importance weighting procedure. Specifically, users write simple \"slicing functions\" {\u2013} noisy, potentially correlated binary functions intended to capture possible axes of distribution shift {\u2013} to compute reweighted performance estimates. We further describe a density ratio estimation framework for the slices and show how its estimation error scales with slice quality and dataset size. Empirical validation on NLP and vision tasks shows that Mandoline can estimate performance on the target distribution up to 3x more accurately compared to standard baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21i/chen21i.pdf", "supp": "", "pdf_size": 689357, "gs_citation": 100, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3421066091815040064&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Institute for Computational and Mathematical Engineering, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "stanford.edu;cs.stanford.edu;stanford.edu; ; ; ", "email": "stanford.edu;cs.stanford.edu;stanford.edu; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/chen21i.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Marginal Contribution Feature Importance - an Axiomatic Approach for Explaining Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10695", "id": "10695", "proceeding": "http://proceedings.mlr.press/v139/catav21a.html", "slides": "", "author_site": "Amnon Catav, Boyang Fu, Yazeed Zoabi, Ahuva Weiss Meilik, Noam Shomron, Jason Ernst, Sriram Sankararaman, Ran Gilad-Bachrach", "author": "Amnon Catav; Boyang Fu; Yazeed Zoabi; Ahuva Libi Weiss Meilik; Noam Shomron; Jason Ernst; Sriram Sankararaman; Ran Gilad-Bachrach", "abstract": "In recent years, methods were proposed for assigning feature importance scores to measure the contribution of individual features. While in some cases the goal is to understand a specific model, in many cases the goal is to understand the contribution of certain properties (features) to a real-world phenomenon. Thus, a distinction has been made between feature importance scores that explain a model and scores that explain the data. When explaining the data, machine learning models are used as proxies in settings where conducting many real-world experiments is expensive or prohibited. While existing feature importance scores show great success in explaining models, we demonstrate their limitations when explaining the data, especially in the presence of correlations between features. Therefore, we develop a set of axioms to capture properties expected from a feature importance score when explaining data and prove that there exists only one score that satisfies all of them, the Marginal Contribution Feature Importance (MCI). We analyze the theoretical properties of this score function and demonstrate its merits empirically.", "bibtex": "@InProceedings{pmlr-v139-catav21a,\n title = \t {Marginal Contribution Feature Importance - an Axiomatic Approach for Explaining Data},\n author = {Catav, Amnon and Fu, Boyang and Zoabi, Yazeed and Meilik, Ahuva Libi Weiss and Shomron, Noam and Ernst, Jason and Sankararaman, Sriram and Gilad-Bachrach, Ran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1324--1335},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/catav21a/catav21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/catav21a.html},\n abstract = \t {In recent years, methods were proposed for assigning feature importance scores to measure the contribution of individual features. While in some cases the goal is to understand a specific model, in many cases the goal is to understand the contribution of certain properties (features) to a real-world phenomenon. Thus, a distinction has been made between feature importance scores that explain a model and scores that explain the data. When explaining the data, machine learning models are used as proxies in settings where conducting many real-world experiments is expensive or prohibited. While existing feature importance scores show great success in explaining models, we demonstrate their limitations when explaining the data, especially in the presence of correlations between features. Therefore, we develop a set of axioms to capture properties expected from a feature importance score when explaining data and prove that there exists only one score that satisfies all of them, the Marginal Contribution Feature Importance (MCI). We analyze the theoretical properties of this score function and demonstrate its merits empirically.}\n}", "pdf": "http://proceedings.mlr.press/v139/catav21a/catav21a.pdf", "supp": "", "pdf_size": 1068238, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3319698669337015414&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/catav21a.html" }, { "title": "Marginalized Stochastic Natural Gradients for Black-Box Variational Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9853", "id": "9853", "proceeding": "http://proceedings.mlr.press/v139/ji21b.html", "slides": "/media/icml-2021/Slides/9853.pdf", "author_site": "Geng Ji, Debora Sujono, Erik Sudderth", "author": "Geng Ji; Debora Sujono; Erik B Sudderth", "abstract": "Black-box variational inference algorithms use stochastic sampling to analyze diverse statistical models, like those expressed in probabilistic programming languages, without model-specific derivations. While the popular score-function estimator computes unbiased gradient estimates, its variance is often unacceptably large, especially in models with discrete latent variables. We propose a stochastic natural gradient estimator that is as broadly applicable and unbiased, but improves efficiency by exploiting the curvature of the variational bound, and provably reduces variance by marginalizing discrete latent variables. Our marginalized stochastic natural gradients have intriguing connections to classic coordinate ascent variational inference, but allow parallel updates of variational parameters, and provide superior convergence guarantees relative to naive Monte Carlo approximations. We integrate our method with the probabilistic programming language Pyro and evaluate real-world models of documents, images, networks, and crowd-sourcing. Compared to score-function estimators, we require far fewer Monte Carlo samples and consistently convergence orders of magnitude faster.", "bibtex": "@InProceedings{pmlr-v139-ji21b,\n title = \t {Marginalized Stochastic Natural Gradients for Black-Box Variational Inference},\n author = {Ji, Geng and Sujono, Debora and Sudderth, Erik B},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4870--4881},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ji21b/ji21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/ji21b.html},\n abstract = \t {Black-box variational inference algorithms use stochastic sampling to analyze diverse statistical models, like those expressed in probabilistic programming languages, without model-specific derivations. While the popular score-function estimator computes unbiased gradient estimates, its variance is often unacceptably large, especially in models with discrete latent variables. We propose a stochastic natural gradient estimator that is as broadly applicable and unbiased, but improves efficiency by exploiting the curvature of the variational bound, and provably reduces variance by marginalizing discrete latent variables. Our marginalized stochastic natural gradients have intriguing connections to classic coordinate ascent variational inference, but allow parallel updates of variational parameters, and provide superior convergence guarantees relative to naive Monte Carlo approximations. We integrate our method with the probabilistic programming language Pyro and evaluate real-world models of documents, images, networks, and crowd-sourcing. Compared to score-function estimators, we require far fewer Monte Carlo samples and consistently convergence orders of magnitude faster.}\n}", "pdf": "http://proceedings.mlr.press/v139/ji21b/ji21b.pdf", "supp": "", "pdf_size": 6665235, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17969082576879830790&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Facebook AI + Department of Computer Science, University of California, Irvine; Department of Computer Science, University of California, Irvine; Department of Computer Science, University of California, Irvine", "aff_domain": "fb.com;uci.edu; ", "email": "fb.com;uci.edu; ", "github": "", "project": "http://probmods.github.io/ppaml2016/chapters/4-3-variational.html", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ji21b.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Meta;University of California, Irvine", "aff_unique_dep": "Facebook AI;Department of Computer Science", "aff_unique_url": "https://www.facebook.com;https://www.uci.edu", "aff_unique_abbr": "Facebook AI;UCI", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Irvine", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Markpainting: Adversarial Machine Learning meets Inpainting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9367", "id": "9367", "proceeding": "http://proceedings.mlr.press/v139/khachaturov21a.html", "slides": "", "author_site": "David G Khachaturov, Ilia Shumailov, Yiren Zhao, Nicolas Papernot, Ross Anderson", "author": "David Khachaturov; Ilia Shumailov; Yiren Zhao; Nicolas Papernot; Ross Anderson", "abstract": "Inpainting is a learned interpolation technique that is based on generative modeling and used to populate masked or missing pieces in an image; it has wide applications in picture editing and retouching. Recently, inpainting started being used for watermark removal, raising concerns. In this paper we study how to manipulate it using our markpainting technique. First, we show how an image owner with access to an inpainting model can augment their image in such a way that any attempt to edit it using that model will add arbitrary visible information. We find that we can target multiple different models simultaneously with our technique. This can be designed to reconstitute a watermark if the editor had been trying to remove it. Second, we show that our markpainting technique is transferable to models that have different architectures or were trained on different datasets, so watermarks created using it are difficult for adversaries to remove. Markpainting is novel and can be used as a manipulation alarm that becomes visible in the event of inpainting. Source code is available at: https://github.com/iliaishacked/markpainting.", "bibtex": "@InProceedings{pmlr-v139-khachaturov21a,\n title = \t {Markpainting: Adversarial Machine Learning meets Inpainting},\n author = {Khachaturov, David and Shumailov, Ilia and Zhao, Yiren and Papernot, Nicolas and Anderson, Ross},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5409--5419},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/khachaturov21a/khachaturov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/khachaturov21a.html},\n abstract = \t {Inpainting is a learned interpolation technique that is based on generative modeling and used to populate masked or missing pieces in an image; it has wide applications in picture editing and retouching. Recently, inpainting started being used for watermark removal, raising concerns. In this paper we study how to manipulate it using our markpainting technique. First, we show how an image owner with access to an inpainting model can augment their image in such a way that any attempt to edit it using that model will add arbitrary visible information. We find that we can target multiple different models simultaneously with our technique. This can be designed to reconstitute a watermark if the editor had been trying to remove it. Second, we show that our markpainting technique is transferable to models that have different architectures or were trained on different datasets, so watermarks created using it are difficult for adversaries to remove. Markpainting is novel and can be used as a manipulation alarm that becomes visible in the event of inpainting. Source code is available at: https://github.com/iliaishacked/markpainting.}\n}", "pdf": "http://proceedings.mlr.press/v139/khachaturov21a/khachaturov21a.pdf", "supp": "", "pdf_size": 2904663, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7879607124420125546&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Computer Laboratory, University of Cambridge; Computer Laboratory, University of Cambridge + University of Toronto and Vector Institute; Computer Laboratory, University of Cambridge; University of Toronto and Vector Institute; Computer Laboratory, University of Cambridge", "aff_domain": "cl.cam.ac.uk;cl.cam.ac.uk; ; ;cl.cam.ac.uk", "email": "cl.cam.ac.uk;cl.cam.ac.uk; ; ;cl.cam.ac.uk", "github": "https://github.com/iliaishacked/markpainting", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/khachaturov21a.html", "aff_unique_index": "0;0+1;0;1;0", "aff_unique_norm": "University of Cambridge;University of Toronto", "aff_unique_dep": "Computer Laboratory;", "aff_unique_url": "https://www.cam.ac.uk;https://www.utoronto.ca", "aff_unique_abbr": "Cambridge;U of T", "aff_campus_unique_index": "0;0+1;0;1;0", "aff_campus_unique": "Cambridge;Toronto", "aff_country_unique_index": "0;0+1;0;1;0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "Massively Parallel and Asynchronous Tsetlin Machine Architecture Supporting Almost Constant-Time Scaling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10157", "id": "10157", "proceeding": "http://proceedings.mlr.press/v139/abeyrathna21a.html", "slides": "", "author_site": "Kuruge Darshana Abeyrathna, Bimal Bhattarai, Morten Goodwin, Saeed Rahimi Gorji, Ole-Christoffer Granmo, Lei Jiao, Rupsa Saha, Rohan Kumar Yadav", "author": "Kuruge Darshana Abeyrathna; Bimal Bhattarai; Morten Goodwin; Saeed Rahimi Gorji; Ole-Christoffer Granmo; Lei Jiao; Rupsa Saha; Rohan K. Yadav", "abstract": "Using logical clauses to represent patterns, Tsetlin Machine (TM) have recently obtained competitive performance in terms of accuracy, memory footprint, energy, and learning speed on several benchmarks. Each TM clause votes for or against a particular class, with classification resolved using a majority vote. While the evaluation of clauses is fast, being based on binary operators, the voting makes it necessary to synchronize the clause evaluation, impeding parallelization. In this paper, we propose a novel scheme for desynchronizing the evaluation of clauses, eliminating the voting bottleneck. In brief, every clause runs in its own thread for massive native parallelism. For each training example, we keep track of the class votes obtained from the clauses in local voting tallies. The local voting tallies allow us to detach the processing of each clause from the rest of the clauses, supporting decentralized learning. This means that the TM most of the time will operate on outdated voting tallies. We evaluated the proposed parallelization across diverse learning tasks and it turns out that our decentralized TM learning algorithm copes well with working on outdated data, resulting in no significant loss in learning accuracy. Furthermore, we show that the approach provides up to 50 times faster learning. Finally, learning time is almost constant for reasonable clause amounts (employing from 20 to 7,000 clauses on a Tesla V100 GPU). For sufficiently large clause numbers, computation time increases approximately proportionally. Our parallel and asynchronous architecture thus allows processing of more massive datasets and operating with more clauses for higher accuracy.", "bibtex": "@InProceedings{pmlr-v139-abeyrathna21a,\n title = \t {Massively Parallel and Asynchronous Tsetlin Machine Architecture Supporting Almost Constant-Time Scaling},\n author = {Abeyrathna, Kuruge Darshana and Bhattarai, Bimal and Goodwin, Morten and Gorji, Saeed Rahimi and Granmo, Ole-Christoffer and Jiao, Lei and Saha, Rupsa and Yadav, Rohan K.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10--20},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/abeyrathna21a/abeyrathna21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/abeyrathna21a.html},\n abstract = \t {Using logical clauses to represent patterns, Tsetlin Machine (TM) have recently obtained competitive performance in terms of accuracy, memory footprint, energy, and learning speed on several benchmarks. Each TM clause votes for or against a particular class, with classification resolved using a majority vote. While the evaluation of clauses is fast, being based on binary operators, the voting makes it necessary to synchronize the clause evaluation, impeding parallelization. In this paper, we propose a novel scheme for desynchronizing the evaluation of clauses, eliminating the voting bottleneck. In brief, every clause runs in its own thread for massive native parallelism. For each training example, we keep track of the class votes obtained from the clauses in local voting tallies. The local voting tallies allow us to detach the processing of each clause from the rest of the clauses, supporting decentralized learning. This means that the TM most of the time will operate on outdated voting tallies. We evaluated the proposed parallelization across diverse learning tasks and it turns out that our decentralized TM learning algorithm copes well with working on outdated data, resulting in no significant loss in learning accuracy. Furthermore, we show that the approach provides up to 50 times faster learning. Finally, learning time is almost constant for reasonable clause amounts (employing from 20 to 7,000 clauses on a Tesla V100 GPU). For sufficiently large clause numbers, computation time increases approximately proportionally. Our parallel and asynchronous architecture thus allows processing of more massive datasets and operating with more clauses for higher accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/abeyrathna21a/abeyrathna21a.pdf", "supp": "", "pdf_size": 1235664, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14399815899714278833&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway; Department of Information and Communication Technology, University of Agder, Grimstad, Norway", "aff_domain": "uia.no;uia.no;uia.no;uia.no;uia.no;uia.no;uia.no;uia.no", "email": "uia.no;uia.no;uia.no;uia.no;uia.no;uia.no;uia.no;uia.no", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/abeyrathna21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "University of Agder", "aff_unique_dep": "Department of Information and Communication Technology", "aff_unique_url": "https://www.uia.no", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "Grimstad", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "Norway" }, { "title": "Matrix Completion with Model-free Weighting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8707", "id": "8707", "proceeding": "http://proceedings.mlr.press/v139/wang21x.html", "slides": "/media/icml-2021/Slides/8707.pdf", "author_site": "Jiayi Wang, Ka Wai Wong, Xiaojun Mao, Kwun Chuen Gary Chan", "author": "Jiayi Wang; Raymond K. W. Wong; Xiaojun Mao; Kwun Chuen Gary Chan", "abstract": "In this paper, we propose a novel method for matrix completion under general non-uniform missing structures. By controlling an upper bound of a novel balancing error, we construct weights that can actively adjust for the non-uniformity in the empirical risk without explicitly modeling the observation probabilities, and can be computed efficiently via convex optimization. The recovered matrix based on the proposed weighted empirical risk enjoys appealing theoretical guarantees. In particular, the proposed method achieves stronger guarantee than existing work in terms of the scaling with respect to the observation probabilities, under asymptotically heterogeneous missing settings (where entry-wise observation probabilities can be of different orders). These settings can be regarded as a better theoretical model of missing patterns with highly varying probabilities. We also provide a new minimax lower bound under a class of heterogeneous settings. Numerical experiments are also provided to demonstrate the effectiveness of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-wang21x,\n title = \t {Matrix Completion with Model-free Weighting},\n author = {Wang, Jiayi and Wong, Raymond K. W. and Mao, Xiaojun and Chan, Kwun Chuen Gary},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10927--10936},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21x/wang21x.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21x.html},\n abstract = \t {In this paper, we propose a novel method for matrix completion under general non-uniform missing structures. By controlling an upper bound of a novel balancing error, we construct weights that can actively adjust for the non-uniformity in the empirical risk without explicitly modeling the observation probabilities, and can be computed efficiently via convex optimization. The recovered matrix based on the proposed weighted empirical risk enjoys appealing theoretical guarantees. In particular, the proposed method achieves stronger guarantee than existing work in terms of the scaling with respect to the observation probabilities, under asymptotically heterogeneous missing settings (where entry-wise observation probabilities can be of different orders). These settings can be regarded as a better theoretical model of missing patterns with highly varying probabilities. We also provide a new minimax lower bound under a class of heterogeneous settings. Numerical experiments are also provided to demonstrate the effectiveness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21x/wang21x.pdf", "supp": "", "pdf_size": 340465, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13477751570992585302&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Statistics, Texas A&M University, College Station, TX 77843, USA; Department of Statistics, Texas A&M University, College Station, TX 77843, USA; School of Data Science, Fudan University, Shanghai, 200433, China; Department of Biostatistics, University of Washington, Seattle, WA 98195, USA", "aff_domain": "tamu.edu;tamu.edu;fudan.edu.cn;u.washington.edu", "email": "tamu.edu;tamu.edu;fudan.edu.cn;u.washington.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21x.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "Texas A&M University;Fudan University;University of Washington", "aff_unique_dep": "Department of Statistics;School of Data Science;Department of Biostatistics", "aff_unique_url": "https://www.tamu.edu;https://www.fudan.edu.cn;https://www.washington.edu", "aff_unique_abbr": "TAMU;Fudan;UW", "aff_campus_unique_index": "0;0;1;2", "aff_campus_unique": "College Station;Shanghai;Seattle", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Matrix Sketching for Secure Collaborative Machine Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10417", "id": "10417", "proceeding": "http://proceedings.mlr.press/v139/zhang21v.html", "slides": "", "author_site": "Mengjiao Zhang, Shusen Wang", "author": "Mengjiao Zhang; Shusen Wang", "abstract": "Collaborative learning allows participants to jointly train a model without data sharing. To update the model parameters, the central server broadcasts model parameters to the clients, and the clients send updating directions such as gradients to the server. While data do not leave a client device, the communicated gradients and parameters will leak a client\u2019s privacy. Attacks that infer clients\u2019 privacy from gradients and parameters have been developed by prior work. Simple defenses such as dropout and differential privacy either fail to defend the attacks or seriously hurt test accuracy. We propose a practical defense which we call Double-Blind Collaborative Learning (DBCL). The high-level idea is to apply random matrix sketching to the parameters (aka weights) and re-generate random sketching after each iteration. DBCL prevents clients from conducting gradient-based privacy inferences which are the most effective attacks. DBCL works because from the attacker\u2019s perspective, sketching is effectively random noise that outweighs the signal. Notably, DBCL does not much increase computation and communication costs and does not hurt test accuracy at all.", "bibtex": "@InProceedings{pmlr-v139-zhang21v,\n title = \t {Matrix Sketching for Secure Collaborative Machine Learning},\n author = {Zhang, Mengjiao and Wang, Shusen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12589--12599},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21v/zhang21v.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21v.html},\n abstract = \t {Collaborative learning allows participants to jointly train a model without data sharing. To update the model parameters, the central server broadcasts model parameters to the clients, and the clients send updating directions such as gradients to the server. While data do not leave a client device, the communicated gradients and parameters will leak a client\u2019s privacy. Attacks that infer clients\u2019 privacy from gradients and parameters have been developed by prior work. Simple defenses such as dropout and differential privacy either fail to defend the attacks or seriously hurt test accuracy. We propose a practical defense which we call Double-Blind Collaborative Learning (DBCL). The high-level idea is to apply random matrix sketching to the parameters (aka weights) and re-generate random sketching after each iteration. DBCL prevents clients from conducting gradient-based privacy inferences which are the most effective attacks. DBCL works because from the attacker\u2019s perspective, sketching is effectively random noise that outweighs the signal. Notably, DBCL does not much increase computation and communication costs and does not hurt test accuracy at all.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21v/zhang21v.pdf", "supp": "", "pdf_size": 1059763, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3052703848171285313&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Stevens Institute of Technology, Hoboken, NJ 07030; Department of Computer Science, Stevens Institute of Technology, Hoboken, NJ 07030", "aff_domain": "stevens.edu;stevens.edu", "email": "stevens.edu;stevens.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhang21v.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stevens Institute of Technology", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stevens.edu", "aff_unique_abbr": "SIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hoboken", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Maximum Mean Discrepancy Test is Aware of Adversarial Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10683", "id": "10683", "proceeding": "http://proceedings.mlr.press/v139/gao21b.html", "slides": "", "author_site": "Ruize Gao, Feng Liu, Jingfeng Zhang, Bo Han, Tongliang Liu, Gang Niu, Masashi Sugiyama", "author": "Ruize Gao; Feng Liu; Jingfeng Zhang; Bo Han; Tongliang Liu; Gang Niu; Masashi Sugiyama", "abstract": "The maximum mean discrepancy (MMD) test could in principle detect any distributional discrepancy between two datasets. However, it has been shown that the MMD test is unaware of adversarial attacks\u2013the MMD test failed to detect the discrepancy between natural data and adversarial data. Given this phenomenon, we raise a question: are natural and adversarial data really from different distributions? The answer is affirmative\u2013the previous use of the MMD test on the purpose missed three key factors, and accordingly, we propose three components. Firstly, the Gaussian kernel has limited representation power, and we replace it with an effective deep kernel. Secondly, the test power of the MMD test was neglected, and we maximize it following asymptotic statistics. Finally, adversarial data may be non-independent, and we overcome this issue with the help of wild bootstrap. By taking care of the three factors, we verify that the MMD test is aware of adversarial attacks, which lights up a novel road for adversarial data detection based on two-sample tests.", "bibtex": "@InProceedings{pmlr-v139-gao21b,\n title = \t {Maximum Mean Discrepancy Test is Aware of Adversarial Attacks},\n author = {Gao, Ruize and Liu, Feng and Zhang, Jingfeng and Han, Bo and Liu, Tongliang and Niu, Gang and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3564--3575},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gao21b/gao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/gao21b.html},\n abstract = \t {The maximum mean discrepancy (MMD) test could in principle detect any distributional discrepancy between two datasets. However, it has been shown that the MMD test is unaware of adversarial attacks\u2013the MMD test failed to detect the discrepancy between natural data and adversarial data. Given this phenomenon, we raise a question: are natural and adversarial data really from different distributions? The answer is affirmative\u2013the previous use of the MMD test on the purpose missed three key factors, and accordingly, we propose three components. Firstly, the Gaussian kernel has limited representation power, and we replace it with an effective deep kernel. Secondly, the test power of the MMD test was neglected, and we maximize it following asymptotic statistics. Finally, adversarial data may be non-independent, and we overcome this issue with the help of wild bootstrap. By taking care of the three factors, we verify that the MMD test is aware of adversarial attacks, which lights up a novel road for adversarial data detection based on two-sample tests.}\n}", "pdf": "http://proceedings.mlr.press/v139/gao21b/gao21b.pdf", "supp": "", "pdf_size": 3820137, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5133700864957699812&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/gao21b.html" }, { "title": "Measuring Robustness in Deep Learning Based Compressive Sensing", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9951", "id": "9951", "proceeding": "http://proceedings.mlr.press/v139/darestani21a.html", "slides": "", "author_site": "Mohammad Zalbagi Darestani, Akshay Chaudhari, Reinhard Heckel", "author": "Mohammad Zalbagi Darestani; Akshay S Chaudhari; Reinhard Heckel", "abstract": "Deep neural networks give state-of-the-art accuracy for reconstructing images from few and noisy measurements, a problem arising for example in accelerated magnetic resonance imaging (MRI). However, recent works have raised concerns that deep-learning-based image reconstruction methods are sensitive to perturbations and are less robust than traditional methods: Neural networks (i) may be sensitive to small, yet adversarially-selected perturbations, (ii) may perform poorly under distribution shifts, and (iii) may fail to recover small but important features in an image. In order to understand the sensitivity to such perturbations, in this work, we measure the robustness of different approaches for image reconstruction including trained and un-trained neural networks as well as traditional sparsity-based methods. We find, contrary to prior works, that both trained and un-trained methods are vulnerable to adversarial perturbations. Moreover, both trained and un-trained methods tuned for a particular dataset suffer very similarly from distribution shifts. Finally, we demonstrate that an image reconstruction method that achieves higher reconstruction quality, also performs better in terms of accurately recovering fine details. Our results indicate that the state-of-the-art deep-learning-based image reconstruction methods provide improved performance than traditional methods without compromising robustness.", "bibtex": "@InProceedings{pmlr-v139-darestani21a,\n title = \t {Measuring Robustness in Deep Learning Based Compressive Sensing},\n author = {Darestani, Mohammad Zalbagi and Chaudhari, Akshay S and Heckel, Reinhard},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2433--2444},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/darestani21a/darestani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/darestani21a.html},\n abstract = \t {Deep neural networks give state-of-the-art accuracy for reconstructing images from few and noisy measurements, a problem arising for example in accelerated magnetic resonance imaging (MRI). However, recent works have raised concerns that deep-learning-based image reconstruction methods are sensitive to perturbations and are less robust than traditional methods: Neural networks (i) may be sensitive to small, yet adversarially-selected perturbations, (ii) may perform poorly under distribution shifts, and (iii) may fail to recover small but important features in an image. In order to understand the sensitivity to such perturbations, in this work, we measure the robustness of different approaches for image reconstruction including trained and un-trained neural networks as well as traditional sparsity-based methods. We find, contrary to prior works, that both trained and un-trained methods are vulnerable to adversarial perturbations. Moreover, both trained and un-trained methods tuned for a particular dataset suffer very similarly from distribution shifts. Finally, we demonstrate that an image reconstruction method that achieves higher reconstruction quality, also performs better in terms of accurately recovering fine details. Our results indicate that the state-of-the-art deep-learning-based image reconstruction methods provide improved performance than traditional methods without compromising robustness.}\n}", "pdf": "http://proceedings.mlr.press/v139/darestani21a/darestani21a.pdf", "supp": "", "pdf_size": 773221, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15924992003782305417&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, Rice University; Department of Radiology and Department of Biomedical Data Science, Stanford University; Department of Electrical and Computer Engineering, Technical University of Munich", "aff_domain": "rice.edu;stanford.edu;rice.edu", "email": "rice.edu;stanford.edu;rice.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/darestani21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Rice University;Stanford University;Technical University of Munich", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Radiology;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.rice.edu;https://www.stanford.edu;https://www.tum.de", "aff_unique_abbr": "Rice;Stanford;TUM", "aff_campus_unique_index": "1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Germany" }, { "title": "Mediated Uncoupled Learning: Learning Functions without Direct Input-output Correspondences", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9509", "id": "9509", "proceeding": "http://proceedings.mlr.press/v139/yamane21a.html", "slides": "/media/icml-2021/Slides/9509_SrVKb7O.pdf", "author_site": "Ikko Yamane, Junya Honda, Florian YGER, Masashi Sugiyama", "author": "Ikko Yamane; Junya Honda; Florian Yger; Masashi Sugiyama", "abstract": "Ordinary supervised learning is useful when we have paired training data of input $X$ and output $Y$. However, such paired data can be difficult to collect in practice. In this paper, we consider the task of predicting $Y$ from $X$ when we have no paired data of them, but we have two separate, independent datasets of $X$ and $Y$ each observed with some mediating variable $U$, that is, we have two datasets $S_X = \\{(X_i, U_i)\\}$ and $S_Y = \\{(U\u2019_j, Y\u2019_j)\\}$. A naive approach is to predict $U$ from $X$ using $S_X$ and then $Y$ from $U$ using $S_Y$, but we show that this is not statistically consistent. Moreover, predicting $U$ can be more difficult than predicting $Y$ in practice, e.g., when $U$ has higher dimensionality. To circumvent the difficulty, we propose a new method that avoids predicting $U$ but directly learns $Y = f(X)$ by training $f(X)$ with $S_{X}$ to predict $h(U)$ which is trained with $S_{Y}$ to approximate $Y$. We prove statistical consistency and error bounds of our method and experimentally confirm its practical usefulness.", "bibtex": "@InProceedings{pmlr-v139-yamane21a,\n title = \t {Mediated Uncoupled Learning: Learning Functions without Direct Input-output Correspondences},\n author = {Yamane, Ikko and Honda, Junya and Yger, Florian and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11637--11647},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yamane21a/yamane21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yamane21a.html},\n abstract = \t {Ordinary supervised learning is useful when we have paired training data of input $X$ and output $Y$. However, such paired data can be difficult to collect in practice. In this paper, we consider the task of predicting $Y$ from $X$ when we have no paired data of them, but we have two separate, independent datasets of $X$ and $Y$ each observed with some mediating variable $U$, that is, we have two datasets $S_X = \\{(X_i, U_i)\\}$ and $S_Y = \\{(U\u2019_j, Y\u2019_j)\\}$. A naive approach is to predict $U$ from $X$ using $S_X$ and then $Y$ from $U$ using $S_Y$, but we show that this is not statistically consistent. Moreover, predicting $U$ can be more difficult than predicting $Y$ in practice, e.g., when $U$ has higher dimensionality. To circumvent the difficulty, we propose a new method that avoids predicting $U$ but directly learns $Y = f(X)$ by training $f(X)$ with $S_{X}$ to predict $h(U)$ which is trained with $S_{Y}$ to approximate $Y$. We prove statistical consistency and error bounds of our method and experimentally confirm its practical usefulness.}\n}", "pdf": "http://proceedings.mlr.press/v139/yamane21a/yamane21a.pdf", "supp": "", "pdf_size": 5761506, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17617652020684598053&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "LAMSADE, CNRS, Universit\u00e9 Paris-Dauphine, PSL Research University, 75016 PARIS, FRANCE+RIKEN AIP, Tokyo, Japan; Kyoto University, Kyoto, Japan+RIKEN AIP, Tokyo, Japan; LAMSADE, CNRS, Universit\u00e9 Paris-Dauphine, PSL Research University, 75016 PARIS, FRANCE+RIKEN AIP, Tokyo, Japan; RIKEN AIP, Tokyo, Japan+The University of Tokyo, Tokyo, Japan", "aff_domain": "dauphine.psl.eu; ; ; ", "email": "dauphine.psl.eu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yamane21a.html", "aff_unique_index": "0+1;2+1;0+1;1+3", "aff_unique_norm": "Universit\u00e9 Paris-Dauphine;RIKEN AIP;Kyoto University;University of Tokyo", "aff_unique_dep": "LAMSADE;;;", "aff_unique_url": "https://www.univ-paris-dauphine.fr;https://aip.Riken.jp;https://www.kyoto-u.ac.jp;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "UPD;RIKEN AIP;Kyoto U;UTokyo", "aff_campus_unique_index": "0+1;2+1;0+1;1+1", "aff_campus_unique": "Paris;Tokyo;Kyoto", "aff_country_unique_index": "0+1;1+1;0+1;1+1", "aff_country_unique": "France;Japan" }, { "title": "Megaverse: Simulating Embodied Agents at One Million Experiences per Second", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10247", "id": "10247", "proceeding": "http://proceedings.mlr.press/v139/petrenko21a.html", "slides": "/media/icml-2021/Slides/10247.pdf", "author_site": "Aleksei Petrenko, Erik Wijmans, Brennan Shacklett, Vladlen Koltun", "author": "Aleksei Petrenko; Erik Wijmans; Brennan Shacklett; Vladlen Koltun", "abstract": "We present Megaverse, a new 3D simulation platform for reinforcement learning and embodied AI research. The efficient design of our engine enables physics-based simulation with high-dimensional egocentric observations at more than 1,000,000 actions per second on a single 8-GPU node. Megaverse is up to 70x faster than DeepMind Lab in fully-shaded 3D scenes with interactive objects. We achieve this high simulation performance by leveraging batched simulation, thereby taking full advantage of the massive parallelism of modern GPUs. We use Megaverse to build a new benchmark that consists of several single-agent and multi-agent tasks covering a variety of cognitive challenges. We evaluate model-free RL on this benchmark to provide baselines and facilitate future research.", "bibtex": "@InProceedings{pmlr-v139-petrenko21a,\n title = \t {Megaverse: Simulating Embodied Agents at One Million Experiences per Second},\n author = {Petrenko, Aleksei and Wijmans, Erik and Shacklett, Brennan and Koltun, Vladlen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8556--8566},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/petrenko21a/petrenko21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/petrenko21a.html},\n abstract = \t {We present Megaverse, a new 3D simulation platform for reinforcement learning and embodied AI research. The efficient design of our engine enables physics-based simulation with high-dimensional egocentric observations at more than 1,000,000 actions per second on a single 8-GPU node. Megaverse is up to 70x faster than DeepMind Lab in fully-shaded 3D scenes with interactive objects. We achieve this high simulation performance by leveraging batched simulation, thereby taking full advantage of the massive parallelism of modern GPUs. We use Megaverse to build a new benchmark that consists of several single-agent and multi-agent tasks covering a variety of cognitive challenges. We evaluate model-free RL on this benchmark to provide baselines and facilitate future research.}\n}", "pdf": "http://proceedings.mlr.press/v139/petrenko21a/petrenko21a.pdf", "supp": "", "pdf_size": 4370541, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3066110392358323524&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Intel Labs + University of Southern California; Intel Labs; Georgia Institute of Technology; Intel Labs + Stanford University", "aff_domain": "usc.edu; ; ; ", "email": "usc.edu; ; ; ", "github": "", "project": "www.megaverse.info", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/petrenko21a.html", "aff_unique_index": "0+1;0;2;0+3", "aff_unique_norm": "Intel;University of Southern California;Georgia Institute of Technology;Stanford University", "aff_unique_dep": "Intel Labs;;;", "aff_unique_url": "https://www.intel.com;https://www.usc.edu;https://www.gatech.edu;https://www.stanford.edu", "aff_unique_abbr": "Intel;USC;Georgia Tech;Stanford", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Los Angeles;Stanford", "aff_country_unique_index": "0+0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Memory Efficient Online Meta Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9787", "id": "9787", "proceeding": "http://proceedings.mlr.press/v139/acar21b.html", "slides": "", "author_site": "Durmus Alp Emre Acar, Ruizhao Zhu, Venkatesh Saligrama", "author": "Durmus Alp Emre Acar; Ruizhao Zhu; Venkatesh Saligrama", "abstract": "We propose a novel algorithm for online meta learning where task instances are sequentially revealed with limited supervision and a learner is expected to meta learn them in each round, so as to allow the learner to customize a task-specific model rapidly with little task-level supervision. A fundamental concern arising in online meta-learning is the scalability of memory as more tasks are viewed over time. Heretofore, prior works have allowed for perfect recall leading to linear increase in memory with time. Different from prior works, in our method, prior task instances are allowed to be deleted. We propose to leverage prior task instances by means of a fixed-size state-vector, which is updated sequentially. Our theoretical analysis demonstrates that our proposed memory efficient online learning (MOML) method suffers sub-linear regret with convex loss functions and sub-linear local regret for nonconvex losses. On benchmark datasets we show that our method can outperform prior works even though they allow for perfect recall.", "bibtex": "@InProceedings{pmlr-v139-acar21b,\n title = \t {Memory Efficient Online Meta Learning},\n author = {Acar, Durmus Alp Emre and Zhu, Ruizhao and Saligrama, Venkatesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {32--42},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/acar21b/acar21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/acar21b.html},\n abstract = \t {We propose a novel algorithm for online meta learning where task instances are sequentially revealed with limited supervision and a learner is expected to meta learn them in each round, so as to allow the learner to customize a task-specific model rapidly with little task-level supervision. A fundamental concern arising in online meta-learning is the scalability of memory as more tasks are viewed over time. Heretofore, prior works have allowed for perfect recall leading to linear increase in memory with time. Different from prior works, in our method, prior task instances are allowed to be deleted. We propose to leverage prior task instances by means of a fixed-size state-vector, which is updated sequentially. Our theoretical analysis demonstrates that our proposed memory efficient online learning (MOML) method suffers sub-linear regret with convex loss functions and sub-linear local regret for nonconvex losses. On benchmark datasets we show that our method can outperform prior works even though they allow for perfect recall.}\n}", "pdf": "http://proceedings.mlr.press/v139/acar21b/acar21b.pdf", "supp": "", "pdf_size": 627265, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3854023236205369963&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Boston University; Boston University; Boston University", "aff_domain": "bu.edu;bu.edu;bu.edu", "email": "bu.edu;bu.edu;bu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/acar21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Memory-Efficient Pipeline-Parallel DNN Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10457", "id": "10457", "proceeding": "http://proceedings.mlr.press/v139/narayanan21a.html", "slides": "/media/icml-2021/Slides/10457.pdf", "author_site": "Deepak Narayanan, Amar Phanishayee, Kaiyu Shi, Xie Chen, Matei Zaharia", "author": "Deepak Narayanan; Amar Phanishayee; Kaiyu Shi; Xie Chen; Matei Zaharia", "abstract": "Many state-of-the-art ML results have been obtained by scaling up the number of parameters in existing models. However, parameters and activations for such large models often do not fit in the memory of a single accelerator device; this means that it is necessary to distribute training of large models over multiple accelerators. In this work, we propose PipeDream-2BW, a system that supports memory-efficient pipeline parallelism. PipeDream-2BW uses a novel pipelining and weight gradient coalescing strategy, combined with the double buffering of weights, to ensure high throughput, low memory footprint, and weight update semantics similar to data parallelism. In addition, PipeDream-2BW automatically partitions the model over the available hardware resources, while respecting hardware constraints such as memory capacities of accelerators and interconnect topologies. PipeDream-2BW can accelerate the training of large GPT and BERT language models by up to 20x with similar final model accuracy.", "bibtex": "@InProceedings{pmlr-v139-narayanan21a,\n title = \t {Memory-Efficient Pipeline-Parallel DNN Training},\n author = {Narayanan, Deepak and Phanishayee, Amar and Shi, Kaiyu and Chen, Xie and Zaharia, Matei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7937--7947},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/narayanan21a/narayanan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/narayanan21a.html},\n abstract = \t {Many state-of-the-art ML results have been obtained by scaling up the number of parameters in existing models. However, parameters and activations for such large models often do not fit in the memory of a single accelerator device; this means that it is necessary to distribute training of large models over multiple accelerators. In this work, we propose PipeDream-2BW, a system that supports memory-efficient pipeline parallelism. PipeDream-2BW uses a novel pipelining and weight gradient coalescing strategy, combined with the double buffering of weights, to ensure high throughput, low memory footprint, and weight update semantics similar to data parallelism. In addition, PipeDream-2BW automatically partitions the model over the available hardware resources, while respecting hardware constraints such as memory capacities of accelerators and interconnect topologies. PipeDream-2BW can accelerate the training of large GPT and BERT language models by up to 20x with similar final model accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v139/narayanan21a/narayanan21a.pdf", "supp": "", "pdf_size": 1551407, "gs_citation": 274, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1218765086676148615&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Stanford University; Microsoft Research; Microsoft + Stanford University; Microsoft; Stanford University", "aff_domain": "cs.stanford.edu; ; ; ; ", "email": "cs.stanford.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/narayanan21a.html", "aff_unique_index": "0;1;1+0;1;0", "aff_unique_norm": "Stanford University;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.stanford.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Stanford;MSR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Message Passing Adaptive Resonance Theory for Online Active Semi-supervised Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9089", "id": "9089", "proceeding": "http://proceedings.mlr.press/v139/kim21e.html", "slides": "/media/icml-2021/Slides/9089.pdf", "author_site": "Taehyeong Kim, Injune Hwang, Hyundo Lee, Hyunseo Kim, Won-Seok Choi, Joseph Lim, Byoung-Tak Zhang", "author": "Taehyeong Kim; Injune Hwang; Hyundo Lee; Hyunseo Kim; Won-Seok Choi; Joseph J Lim; Byoung-Tak Zhang", "abstract": "Active learning is widely used to reduce labeling effort and training time by repeatedly querying only the most beneficial samples from unlabeled data. In real-world problems where data cannot be stored indefinitely due to limited storage or privacy issues, the query selection and the model update should be performed as soon as a new data sample is observed. Various online active learning methods have been studied to deal with these challenges; however, there are difficulties in selecting representative query samples and updating the model efficiently without forgetting. In this study, we propose Message Passing Adaptive Resonance Theory (MPART) that learns the distribution and topology of input data online. Through message passing on the topological graph, MPART actively queries informative and representative samples, and continuously improves the classification performance using both labeled and unlabeled data. We evaluate our model in stream-based selective sampling scenarios with comparable query selection strategies, showing that MPART significantly outperforms competitive models.", "bibtex": "@InProceedings{pmlr-v139-kim21e,\n title = \t {Message Passing Adaptive Resonance Theory for Online Active Semi-supervised Learning},\n author = {Kim, Taehyeong and Hwang, Injune and Lee, Hyundo and Kim, Hyunseo and Choi, Won-Seok and Lim, Joseph J and Zhang, Byoung-Tak},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5519--5529},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21e/kim21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21e.html},\n abstract = \t {Active learning is widely used to reduce labeling effort and training time by repeatedly querying only the most beneficial samples from unlabeled data. In real-world problems where data cannot be stored indefinitely due to limited storage or privacy issues, the query selection and the model update should be performed as soon as a new data sample is observed. Various online active learning methods have been studied to deal with these challenges; however, there are difficulties in selecting representative query samples and updating the model efficiently without forgetting. In this study, we propose Message Passing Adaptive Resonance Theory (MPART) that learns the distribution and topology of input data online. Through message passing on the topological graph, MPART actively queries informative and representative samples, and continuously improves the classification performance using both labeled and unlabeled data. We evaluate our model in stream-based selective sampling scenarios with comparable query selection strategies, showing that MPART significantly outperforms competitive models.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21e/kim21e.pdf", "supp": "", "pdf_size": 2135262, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7425344514166188748&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "AI Lab, CTO Division, LG Electronics, Seoul, Republic of Korea+Seoul National University, Seoul, Republic of Korea; AI Lab, CTO Division, LG Electronics, Seoul, Republic of Korea; Seoul National University, Seoul, Republic of Korea; Seoul National University, Seoul, Republic of Korea; Seoul National University, Seoul, Republic of Korea; University of Southern California, California, USA+Seoul National University, Seoul, Republic of Korea; Seoul National University, Seoul, Republic of Korea", "aff_domain": "lge.com;lge.com;snu.ac.kr;snu.ac.kr;snu.ac.kr;usc.edu;bi.snu.ac.kr", "email": "lge.com;lge.com;snu.ac.kr;snu.ac.kr;snu.ac.kr;usc.edu;bi.snu.ac.kr", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/kim21e.html", "aff_unique_index": "0+1;0;1;1;1;2+1;1", "aff_unique_norm": "LG;Seoul National University;University of Southern California", "aff_unique_dep": "AI Lab, CTO Division;;", "aff_unique_url": "https://www.lg.com;https://www.snu.ac.kr;https://www.usc.edu", "aff_unique_abbr": "LG;SNU;USC", "aff_campus_unique_index": "0+0;0;0;0;0;1+0;0", "aff_campus_unique": "Seoul;Los Angeles", "aff_country_unique_index": "0+0;0;0;0;0;1+0;0", "aff_country_unique": "South Korea;United States" }, { "title": "Meta Learning for Support Recovery in High-dimensional Precision Matrix Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9285", "id": "9285", "proceeding": "http://proceedings.mlr.press/v139/zhang21aa.html", "slides": "/media/icml-2021/Slides/9285.pdf", "author_site": "Qian Zhang, Yilin Zheng, Jean Honorio", "author": "Qian Zhang; Yilin Zheng; Jean Honorio", "abstract": "In this paper, we study meta learning for support (i.e., the set of non-zero entries) recovery in high-dimensional precision matrix estimation where we reduce the sufficient sample complexity in a novel task with the information learned from other auxiliary tasks. In our setup, each task has a different random true precision matrix, each with a possibly different support. We assume that the union of the supports of all the true precision matrices (i.e., the true support union) is small in size. We propose to pool all the samples from different tasks, and \\emph{improperly} estimate a single precision matrix by minimizing the $\\ell_1$-regularized log-determinant Bregman divergence. We show that with high probability, the support of the \\emph{improperly} estimated single precision matrix is equal to the true support union, provided a sufficient number of samples per task $n \\in O((\\log N)/K)$, for $N$-dimensional vectors and $K$ tasks. That is, one requires less samples per task when more tasks are available. We prove a matching information-theoretic lower bound for the necessary number of samples, which is $n \\in \\Omega((\\log N)/K)$, and thus, our algorithm is minimax optimal. Then for the novel task, we prove that the minimization of the $\\ell_1$-regularized log-determinant Bregman divergence with the additional constraint that the support is a subset of the estimated support union could reduce the sufficient sample complexity of successful support recovery to $O(\\log(|S_{\\text{off}}|))$ where $|S_{\\text{off}}|$ is the number of off-diagonal elements in the support union and is much less than $N$ for sparse matrices. We also prove a matching information-theoretic lower bound of $\\Omega(\\log(|S_{\\text{off}}|))$ for the necessary number of samples.", "bibtex": "@InProceedings{pmlr-v139-zhang21aa,\n title = \t {Meta Learning for Support Recovery in High-dimensional Precision Matrix Estimation},\n author = {Zhang, Qian and Zheng, Yilin and Honorio, Jean},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12642--12652},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21aa/zhang21aa.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21aa.html},\n abstract = \t {In this paper, we study meta learning for support (i.e., the set of non-zero entries) recovery in high-dimensional precision matrix estimation where we reduce the sufficient sample complexity in a novel task with the information learned from other auxiliary tasks. In our setup, each task has a different random true precision matrix, each with a possibly different support. We assume that the union of the supports of all the true precision matrices (i.e., the true support union) is small in size. We propose to pool all the samples from different tasks, and \\emph{improperly} estimate a single precision matrix by minimizing the $\\ell_1$-regularized log-determinant Bregman divergence. We show that with high probability, the support of the \\emph{improperly} estimated single precision matrix is equal to the true support union, provided a sufficient number of samples per task $n \\in O((\\log N)/K)$, for $N$-dimensional vectors and $K$ tasks. That is, one requires less samples per task when more tasks are available. We prove a matching information-theoretic lower bound for the necessary number of samples, which is $n \\in \\Omega((\\log N)/K)$, and thus, our algorithm is minimax optimal. Then for the novel task, we prove that the minimization of the $\\ell_1$-regularized log-determinant Bregman divergence with the additional constraint that the support is a subset of the estimated support union could reduce the sufficient sample complexity of successful support recovery to $O(\\log(|S_{\\text{off}}|))$ where $|S_{\\text{off}}|$ is the number of off-diagonal elements in the support union and is much less than $N$ for sparse matrices. We also prove a matching information-theoretic lower bound of $\\Omega(\\log(|S_{\\text{off}}|))$ for the necessary number of samples.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21aa/zhang21aa.pdf", "supp": "", "pdf_size": 507166, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16462595587142456223&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Statistics, Purdue University, West Lafayette, USA; Department of Computer Science, Purdue University, West Lafayette, USA; Department of Computer Science, Purdue University, West Lafayette, USA", "aff_domain": "purdue.edu; ; ", "email": "purdue.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21aa.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Purdue University", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.purdue.edu", "aff_unique_abbr": "Purdue", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "West Lafayette", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Meta-Cal: Well-controlled Post-hoc Calibration by Ranking", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9773", "id": "9773", "proceeding": "http://proceedings.mlr.press/v139/ma21a.html", "slides": "/media/icml-2021/Slides/9773.pdf", "author_site": "Xingchen Ma, Matthew B Blaschko", "author": "Xingchen Ma; Matthew B. Blaschko", "abstract": "In many applications, it is desirable that a classifier not only makes accurate predictions, but also outputs calibrated posterior probabilities. However, many existing classifiers, especially deep neural network classifiers, tend to be uncalibrated. Post-hoc calibration is a technique to recalibrate a model by learning a calibration map. Existing approaches mostly focus on constructing calibration maps with low calibration errors, however, this quality is inadequate for a calibrator being useful. In this paper, we introduce two constraints that are worth consideration in designing a calibration map for post-hoc calibration. Then we present Meta-Cal, which is built from a base calibrator and a ranking model. Under some mild assumptions, two high-probability bounds are given with respect to these constraints. Empirical results on CIFAR-10, CIFAR-100 and ImageNet and a range of popular network architectures show our proposed method significantly outperforms the current state of the art for post-hoc multi-class classification calibration.", "bibtex": "@InProceedings{pmlr-v139-ma21a,\n title = \t {Meta-Cal: Well-controlled Post-hoc Calibration by Ranking},\n author = {Ma, Xingchen and Blaschko, Matthew B.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7235--7245},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ma21a/ma21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ma21a.html},\n abstract = \t {In many applications, it is desirable that a classifier not only makes accurate predictions, but also outputs calibrated posterior probabilities. However, many existing classifiers, especially deep neural network classifiers, tend to be uncalibrated. Post-hoc calibration is a technique to recalibrate a model by learning a calibration map. Existing approaches mostly focus on constructing calibration maps with low calibration errors, however, this quality is inadequate for a calibrator being useful. In this paper, we introduce two constraints that are worth consideration in designing a calibration map for post-hoc calibration. Then we present Meta-Cal, which is built from a base calibrator and a ranking model. Under some mild assumptions, two high-probability bounds are given with respect to these constraints. Empirical results on CIFAR-10, CIFAR-100 and ImageNet and a range of popular network architectures show our proposed method significantly outperforms the current state of the art for post-hoc multi-class classification calibration.}\n}", "pdf": "http://proceedings.mlr.press/v139/ma21a/ma21a.pdf", "supp": "", "pdf_size": 625208, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4779443102063826651&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "ESAT-PSI, KU Leuven, Belgium; ESAT-PSI, KU Leuven, Belgium", "aff_domain": "esat.kuleuven.be; ", "email": "esat.kuleuven.be; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ma21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "KU Leuven", "aff_unique_dep": "ESAT-PSI", "aff_unique_url": "https://www.kuleuven.be", "aff_unique_abbr": "KU Leuven", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Belgium" }, { "title": "Meta-Learning Bidirectional Update Rules", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8529", "id": "8529", "proceeding": "http://proceedings.mlr.press/v139/sandler21a.html", "slides": "/media/icml-2021/Slides/8529_ARaD6Nu.pdf", "author_site": "Mark Sandler, Max Vladymyrov, Andrey Zhmoginov, Nolan Miller, Tom Madams, Andrew Jackson, Blaise Ag\u00fcera y Arcas", "author": "Mark Sandler; Max Vladymyrov; Andrey Zhmoginov; Nolan Miller; Tom Madams; Andrew Jackson; Blaise Ag\u00fcera Y Arcas", "abstract": "In this paper, we introduce a new type of generalized neural network where neurons and synapses maintain multiple states. We show that classical gradient-based backpropagation in neural networks can be seen as a special case of a two-state network where one state is used for activations and another for gradients, with update rules derived from the chain rule. In our generalized framework, networks have neither explicit notion of nor ever receive gradients. The synapses and neurons are updated using a bidirectional Hebb-style update rule parameterized by a shared low-dimensional \"genome\". We show that such genomes can be meta-learned from scratch, using either conventional optimization techniques, or evolutionary strategies, such as CMA-ES. Resulting update rules generalize to unseen tasks and train faster than gradient descent based optimizers for several standard computer vision and synthetic tasks.", "bibtex": "@InProceedings{pmlr-v139-sandler21a,\n title = \t {Meta-Learning Bidirectional Update Rules},\n author = {Sandler, Mark and Vladymyrov, Max and Zhmoginov, Andrey and Miller, Nolan and Madams, Tom and Jackson, Andrew and Arcas, Blaise Ag{\\\"u}era Y},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9288--9300},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sandler21a/sandler21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sandler21a.html},\n abstract = \t {In this paper, we introduce a new type of generalized neural network where neurons and synapses maintain multiple states. We show that classical gradient-based backpropagation in neural networks can be seen as a special case of a two-state network where one state is used for activations and another for gradients, with update rules derived from the chain rule. In our generalized framework, networks have neither explicit notion of nor ever receive gradients. The synapses and neurons are updated using a bidirectional Hebb-style update rule parameterized by a shared low-dimensional \"genome\". We show that such genomes can be meta-learned from scratch, using either conventional optimization techniques, or evolutionary strategies, such as CMA-ES. Resulting update rules generalize to unseen tasks and train faster than gradient descent based optimizers for several standard computer vision and synthetic tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/sandler21a/sandler21a.pdf", "supp": "", "pdf_size": 6919557, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9524592536445734267&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/sandler21a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Meta-StyleSpeech : Multi-Speaker Adaptive Text-to-Speech Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9659", "id": "9659", "proceeding": "http://proceedings.mlr.press/v139/min21b.html", "slides": "", "author_site": "Dongchan Min, Dong Bok Lee, Eunho Yang, Sung Ju Hwang", "author": "Dongchan Min; Dong Bok Lee; Eunho Yang; Sung Ju Hwang", "abstract": "With rapid progress in neural text-to-speech (TTS) models, personalized speech generation is now in high demand for many applications. For practical applicability, a TTS model should generate high-quality speech with only a few audio samples from the given speaker, that are also short in length. However, existing methods either require to fine-tune the model or achieve low adaptation quality without fine-tuning. In this work, we propose StyleSpeech, a new TTS model which not only synthesizes high-quality speech but also effectively adapts to new speakers. Specifically, we propose Style-Adaptive Layer Normalization (SALN) which aligns gain and bias of the text input according to the style extracted from a reference speech audio. With SALN, our model effectively synthesizes speech in the style of the target speaker even from a single speech audio. Furthermore, to enhance StyleSpeech\u2019s adaptation to speech from new speakers, we extend it to Meta-StyleSpeech by introducing two discriminators trained with style prototypes, and performing episodic training. The experimental results show that our models generate high-quality speech which accurately follows the speaker\u2019s voice with single short-duration (1-3 sec) speech audio, significantly outperforming baselines.", "bibtex": "@InProceedings{pmlr-v139-min21b,\n title = \t {Meta-StyleSpeech : Multi-Speaker Adaptive Text-to-Speech Generation},\n author = {Min, Dongchan and Lee, Dong Bok and Yang, Eunho and Hwang, Sung Ju},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7748--7759},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/min21b/min21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/min21b.html},\n abstract = \t {With rapid progress in neural text-to-speech (TTS) models, personalized speech generation is now in high demand for many applications. For practical applicability, a TTS model should generate high-quality speech with only a few audio samples from the given speaker, that are also short in length. However, existing methods either require to fine-tune the model or achieve low adaptation quality without fine-tuning. In this work, we propose StyleSpeech, a new TTS model which not only synthesizes high-quality speech but also effectively adapts to new speakers. Specifically, we propose Style-Adaptive Layer Normalization (SALN) which aligns gain and bias of the text input according to the style extracted from a reference speech audio. With SALN, our model effectively synthesizes speech in the style of the target speaker even from a single speech audio. Furthermore, to enhance StyleSpeech\u2019s adaptation to speech from new speakers, we extend it to Meta-StyleSpeech by introducing two discriminators trained with style prototypes, and performing episodic training. The experimental results show that our models generate high-quality speech which accurately follows the speaker\u2019s voice with single short-duration (1-3 sec) speech audio, significantly outperforming baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/min21b/min21b.pdf", "supp": "", "pdf_size": 528648, "gs_citation": 202, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9200152829644981336&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea+AITRICS, Seoul, South Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea+AITRICS, Seoul, South Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea+AITRICS, Seoul, South Korea; Graduate School of AI, Korea Advanced Institute of Science and Technology (KAIST), Seoul, South Korea+AITRICS, Seoul, South Korea", "aff_domain": "kaist.ac.kr; ; ;kaist.ac.kr", "email": "kaist.ac.kr; ; ;kaist.ac.kr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/min21b.html", "aff_unique_index": "0+1;0+1;0+1;0+1", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;AITRICS", "aff_unique_dep": "Graduate School of AI;", "aff_unique_url": "https://www.kaist.ac.kr;", "aff_unique_abbr": "KAIST;", "aff_campus_unique_index": "0+0;0+0;0+0;0+0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "South Korea" }, { "title": "Meta-Thompson Sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9729", "id": "9729", "proceeding": "http://proceedings.mlr.press/v139/kveton21a.html", "slides": "", "author_site": "Branislav Kveton, Mikhail Konobeev, Manzil Zaheer, Chih-wei Hsu, Martin Mladenov, Craig Boutilier, Csaba Szepesvari", "author": "Branislav Kveton; Mikhail Konobeev; Manzil Zaheer; Chih-Wei Hsu; Martin Mladenov; Craig Boutilier; Csaba Szepesvari", "abstract": "Efficient exploration in bandits is a fundamental online learning problem. We propose a variant of Thompson sampling that learns to explore better as it interacts with bandit instances drawn from an unknown prior. The algorithm meta-learns the prior and thus we call it MetaTS. We propose several efficient implementations of MetaTS and analyze it in Gaussian bandits. Our analysis shows the benefit of meta-learning and is of a broader interest, because we derive a novel prior-dependent Bayes regret bound for Thompson sampling. Our theory is complemented by empirical evaluation, which shows that MetaTS quickly adapts to the unknown prior.", "bibtex": "@InProceedings{pmlr-v139-kveton21a,\n title = \t {Meta-Thompson Sampling},\n author = {Kveton, Branislav and Konobeev, Mikhail and Zaheer, Manzil and Hsu, Chih-Wei and Mladenov, Martin and Boutilier, Craig and Szepesvari, Csaba},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5884--5893},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kveton21a/kveton21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kveton21a.html},\n abstract = \t {Efficient exploration in bandits is a fundamental online learning problem. We propose a variant of Thompson sampling that learns to explore better as it interacts with bandit instances drawn from an unknown prior. The algorithm meta-learns the prior and thus we call it MetaTS. We propose several efficient implementations of MetaTS and analyze it in Gaussian bandits. Our analysis shows the benefit of meta-learning and is of a broader interest, because we derive a novel prior-dependent Bayes regret bound for Thompson sampling. Our theory is complemented by empirical evaluation, which shows that MetaTS quickly adapts to the unknown prior.}\n}", "pdf": "http://proceedings.mlr.press/v139/kveton21a/kveton21a.pdf", "supp": "", "pdf_size": 995369, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12001319711188088040&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Google Research; University of Alberta; Google Research; Google Research; Google Research; Google Research; DeepMind + University of Alberta", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/kveton21a.html", "aff_unique_index": "0;1;0;0;0;0;2+1", "aff_unique_norm": "Google;University of Alberta;DeepMind", "aff_unique_dep": "Google Research;;", "aff_unique_url": "https://research.google;https://www.ualberta.ca;https://deepmind.com", "aff_unique_abbr": "Google Research;UAlberta;DeepMind", "aff_campus_unique_index": "0;0;0;0;0;", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;1;0;0;0;0;2+1", "aff_country_unique": "United States;Canada;United Kingdom" }, { "title": "Meta-learning Hyperparameter Performance Prediction with Neural Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9265", "id": "9265", "proceeding": "http://proceedings.mlr.press/v139/wei21c.html", "slides": "", "author_site": "Ying WEI, Peilin Zhao, Junzhou Huang", "author": "Ying Wei; Peilin Zhao; Junzhou Huang", "abstract": "The surrogate that predicts the performance of hyperparameters has been a key component for sequential model-based hyperparameter optimization. In practical applications, a trial of a hyper-parameter configuration may be so costly that a surrogate is expected to return an optimal configuration with as few trials as possible. Observing that human experts draw on their expertise in a machine learning model by trying configurations that once performed well on other datasets, we are inspired to build a trial-efficient surrogate by transferring the meta-knowledge learned from historical trials on other datasets. We propose an end-to-end surrogate named as Transfer NeuralProcesses (TNP) that learns a comprehensive set of meta-knowledge, including the parameters of historical surrogates, historical trials, and initial configurations for other datasets. Experiments on extensive OpenML datasets and three computer vision datasets demonstrate that the proposed algorithm achieves state-of-the-art performance in at least one order of magnitude less trials.", "bibtex": "@InProceedings{pmlr-v139-wei21c,\n title = \t {Meta-learning Hyperparameter Performance Prediction with Neural Processes},\n author = {Wei, Ying and Zhao, Peilin and Huang, Junzhou},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11058--11067},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wei21c/wei21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/wei21c.html},\n abstract = \t {The surrogate that predicts the performance of hyperparameters has been a key component for sequential model-based hyperparameter optimization. In practical applications, a trial of a hyper-parameter configuration may be so costly that a surrogate is expected to return an optimal configuration with as few trials as possible. Observing that human experts draw on their expertise in a machine learning model by trying configurations that once performed well on other datasets, we are inspired to build a trial-efficient surrogate by transferring the meta-knowledge learned from historical trials on other datasets. We propose an end-to-end surrogate named as Transfer NeuralProcesses (TNP) that learns a comprehensive set of meta-knowledge, including the parameters of historical surrogates, historical trials, and initial configurations for other datasets. Experiments on extensive OpenML datasets and three computer vision datasets demonstrate that the proposed algorithm achieves state-of-the-art performance in at least one order of magnitude less trials.}\n}", "pdf": "http://proceedings.mlr.press/v139/wei21c/wei21c.pdf", "supp": "", "pdf_size": 1556268, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3599136257271489950&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, City University of Hong Kong, Hong Kong; Tencent AI Lab, Shenzhen, China; Tencent AI Lab, Shenzhen, China", "aff_domain": "cityu.edu.hk; ; ", "email": "cityu.edu.hk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wei21c.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "City University of Hong Kong;Tencent", "aff_unique_dep": "Department of Computer Science;AI Lab", "aff_unique_url": "https://www.cityu.edu.hk;https://ai.tencent.com", "aff_unique_abbr": "CityU;Tencent AI Lab", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Hong Kong SAR;Shenzhen", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "MetaCURE: Meta Reinforcement Learning with Empowerment-Driven Exploration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9825", "id": "9825", "proceeding": "http://proceedings.mlr.press/v139/zhang21w.html", "slides": "/media/icml-2021/Slides/9825.pdf", "author_site": "Jin Zhang, Jianhao Wang, Hao Hu, Tong Chen, Yingfeng Chen, Changjie Fan, Chongjie Zhang", "author": "Jin Zhang; Jianhao Wang; Hao Hu; Tong Chen; Yingfeng Chen; Changjie Fan; Chongjie Zhang", "abstract": "Meta reinforcement learning (meta-RL) extracts knowledge from previous tasks and achieves fast adaptation to new tasks. Despite recent progress, efficient exploration in meta-RL remains a key challenge in sparse-reward tasks, as it requires quickly finding informative task-relevant experiences in both meta-training and adaptation. To address this challenge, we explicitly model an exploration policy learning problem for meta-RL, which is separated from exploitation policy learning, and introduce a novel empowerment-driven exploration objective, which aims to maximize information gain for task identification. We derive a corresponding intrinsic reward and develop a new off-policy meta-RL framework, which efficiently learns separate context-aware exploration and exploitation policies by sharing the knowledge of task inference. Experimental evaluation shows that our meta-RL method significantly outperforms state-of-the-art baselines on various sparse-reward MuJoCo locomotion tasks and more complex sparse-reward Meta-World tasks.", "bibtex": "@InProceedings{pmlr-v139-zhang21w,\n title = \t {MetaCURE: Meta Reinforcement Learning with Empowerment-Driven Exploration},\n author = {Zhang, Jin and Wang, Jianhao and Hu, Hao and Chen, Tong and Chen, Yingfeng and Fan, Changjie and Zhang, Chongjie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12600--12610},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21w/zhang21w.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21w.html},\n abstract = \t {Meta reinforcement learning (meta-RL) extracts knowledge from previous tasks and achieves fast adaptation to new tasks. Despite recent progress, efficient exploration in meta-RL remains a key challenge in sparse-reward tasks, as it requires quickly finding informative task-relevant experiences in both meta-training and adaptation. To address this challenge, we explicitly model an exploration policy learning problem for meta-RL, which is separated from exploitation policy learning, and introduce a novel empowerment-driven exploration objective, which aims to maximize information gain for task identification. We derive a corresponding intrinsic reward and develop a new off-policy meta-RL framework, which efficiently learns separate context-aware exploration and exploitation policies by sharing the knowledge of task inference. Experimental evaluation shows that our meta-RL method significantly outperforms state-of-the-art baselines on various sparse-reward MuJoCo locomotion tasks and more complex sparse-reward Meta-World tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21w/zhang21w.pdf", "supp": "", "pdf_size": 2299755, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8017350448991384435&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Institute for Interdisciplinary Information Sciences, Tsinghua University, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, China; Fuxi AI Lab, NetEase, China; Fuxi AI Lab, NetEase, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, China", "aff_domain": "mails.tsinghua.edu.cn; ; ; ; ; ; ", "email": "mails.tsinghua.edu.cn; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zhang21w.html", "aff_unique_index": "0;0;0;0;1;1;0", "aff_unique_norm": "Tsinghua University;Netease", "aff_unique_dep": "Institute for Interdisciplinary Information Sciences;Fuxi AI Lab", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.163.com", "aff_unique_abbr": "Tsinghua;NetEase", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Mind the Box: $l_1$-APGD for Sparse Adversarial Attacks on Image Classifiers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8787", "id": "8787", "proceeding": "http://proceedings.mlr.press/v139/croce21a.html", "slides": "/media/icml-2021/Slides/8787_Qu2X7ul.pdf", "author_site": "Francesco Croce, Matthias Hein", "author": "Francesco Croce; Matthias Hein", "abstract": "We show that when taking into account also the image domain $[0,1]^d$, established $l_1$-projected gradient descent (PGD) attacks are suboptimal as they do not consider that the effective threat model is the intersection of the $l_1$-ball and $[0,1]^d$. We study the expected sparsity of the steepest descent step for this effective threat model and show that the exact projection onto this set is computationally feasible and yields better performance. Moreover, we propose an adaptive form of PGD which is highly effective even with a small budget of iterations. Our resulting $l_1$-APGD is a strong white-box attack showing that prior works overestimated their $l_1$-robustness. Using $l_1$-APGD for adversarial training we get a robust classifier with SOTA $l_1$-robustness. Finally, we combine $l_1$-APGD and an adaptation of the Square Attack to $l_1$ into $l_1$-AutoAttack, an ensemble of attacks which reliably assesses adversarial robustness for the threat model of $l_1$-ball intersected with $[0,1]^d$.", "bibtex": "@InProceedings{pmlr-v139-croce21a,\n title = \t {Mind the Box: $l_1$-APGD for Sparse Adversarial Attacks on Image Classifiers},\n author = {Croce, Francesco and Hein, Matthias},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2201--2211},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/croce21a/croce21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/croce21a.html},\n abstract = \t {We show that when taking into account also the image domain $[0,1]^d$, established $l_1$-projected gradient descent (PGD) attacks are suboptimal as they do not consider that the effective threat model is the intersection of the $l_1$-ball and $[0,1]^d$. We study the expected sparsity of the steepest descent step for this effective threat model and show that the exact projection onto this set is computationally feasible and yields better performance. Moreover, we propose an adaptive form of PGD which is highly effective even with a small budget of iterations. Our resulting $l_1$-APGD is a strong white-box attack showing that prior works overestimated their $l_1$-robustness. Using $l_1$-APGD for adversarial training we get a robust classifier with SOTA $l_1$-robustness. Finally, we combine $l_1$-APGD and an adaptation of the Square Attack to $l_1$ into $l_1$-AutoAttack, an ensemble of attacks which reliably assesses adversarial robustness for the threat model of $l_1$-ball intersected with $[0,1]^d$.}\n}", "pdf": "http://proceedings.mlr.press/v139/croce21a/croce21a.pdf", "supp": "", "pdf_size": 3868024, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15814902844388830149&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of T\u00fcbingen; University of T\u00fcbingen", "aff_domain": "uni-tuebingen.de; ", "email": "uni-tuebingen.de; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/croce21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of T\u00fcbingen", "aff_unique_dep": "", "aff_unique_url": "https://www.uni-tuebingen.de/", "aff_unique_abbr": "Uni T\u00fcbingen", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "Mixed Cross Entropy Loss for Neural Machine Translation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9227", "id": "9227", "proceeding": "http://proceedings.mlr.press/v139/li21n.html", "slides": "/media/icml-2021/Slides/9227.pdf", "author_site": "Haoran Li, Wei Lu", "author": "Haoran Li; Wei Lu", "abstract": "In neural machine translation, Cross Entropy loss (CE) is the standard loss function in two training methods of auto-regressive models, i.e., teacher forcing and scheduled sampling. In this paper, we propose mixed Cross Entropy loss (mixed CE) as a substitute for CE in both training approaches. In teacher forcing, the model trained with CE regards the translation problem as a one-to-one mapping process, while in mixed CE this process can be relaxed to one-to-many. In scheduled sampling, we show that mixed CE has the potential to encourage the training and testing behaviours to be similar to each other, more effectively mitigating the exposure bias problem. We demonstrate the superiority of mixed CE over CE on several machine translation datasets, WMT\u201916 Ro-En, WMT\u201916 Ru-En, and WMT\u201914 En-De in both teacher forcing and scheduled sampling setups. Furthermore, in WMT\u201914 En-De, we also find mixed CE consistently outperforms CE on a multi-reference set as well as a challenging paraphrased reference set. We also found the model trained with mixed CE is able to provide a better probability distribution defined over the translation output space. Our code is available at https://github.com/haorannlp/mix.", "bibtex": "@InProceedings{pmlr-v139-li21n,\n title = \t {Mixed Cross Entropy Loss for Neural Machine Translation},\n author = {Li, Haoran and Lu, Wei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6425--6436},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21n/li21n.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21n.html},\n abstract = \t {In neural machine translation, Cross Entropy loss (CE) is the standard loss function in two training methods of auto-regressive models, i.e., teacher forcing and scheduled sampling. In this paper, we propose mixed Cross Entropy loss (mixed CE) as a substitute for CE in both training approaches. In teacher forcing, the model trained with CE regards the translation problem as a one-to-one mapping process, while in mixed CE this process can be relaxed to one-to-many. In scheduled sampling, we show that mixed CE has the potential to encourage the training and testing behaviours to be similar to each other, more effectively mitigating the exposure bias problem. We demonstrate the superiority of mixed CE over CE on several machine translation datasets, WMT\u201916 Ro-En, WMT\u201916 Ru-En, and WMT\u201914 En-De in both teacher forcing and scheduled sampling setups. Furthermore, in WMT\u201914 En-De, we also find mixed CE consistently outperforms CE on a multi-reference set as well as a challenging paraphrased reference set. We also found the model trained with mixed CE is able to provide a better probability distribution defined over the translation output space. Our code is available at https://github.com/haorannlp/mix.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21n/li21n.pdf", "supp": "", "pdf_size": 1605470, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16791533551271975512&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "StatNLP Research Group, Singapore University of Technology and Design, Singapore; StatNLP Research Group, Singapore University of Technology and Design, Singapore", "aff_domain": "sutd.edu.sg;sutd.edu.sg", "email": "sutd.edu.sg;sutd.edu.sg", "github": "https://github.com/haorannlp/mix", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21n.html", "aff_unique_index": "0;0", "aff_unique_norm": "Singapore University of Technology and Design", "aff_unique_dep": "StatNLP Research Group", "aff_unique_url": "https://www.sutd.edu.sg", "aff_unique_abbr": "SUTD", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Singapore" }, { "title": "Mixed Nash Equilibria in the Adversarial Examples Game", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10029", "id": "10029", "proceeding": "http://proceedings.mlr.press/v139/meunier21a.html", "slides": "", "author_site": "Laurent Meunier, Meyer Scetbon, Rafael Pinot, Jamal Atif, Yann Chevaleyre", "author": "Laurent Meunier; Meyer Scetbon; Rafael B Pinot; Jamal Atif; Yann Chevaleyre", "abstract": "This paper tackles the problem of adversarial examples from a game theoretic point of view. We study the open question of the existence of mixed Nash equilibria in the zero-sum game formed by the attacker and the classifier. While previous works usually allow only one player to use randomized strategies, we show the necessity of considering randomization for both the classifier and the attacker. We demonstrate that this game has no duality gap, meaning that it always admits approximate Nash equilibria. We also provide the first optimization algorithms to learn a mixture of classifiers that approximately realizes the value of this game, \\emph{i.e.} procedures to build an optimally robust randomized classifier.", "bibtex": "@InProceedings{pmlr-v139-meunier21a,\n title = \t {Mixed Nash Equilibria in the Adversarial Examples Game},\n author = {Meunier, Laurent and Scetbon, Meyer and Pinot, Rafael B and Atif, Jamal and Chevaleyre, Yann},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7677--7687},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/meunier21a/meunier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/meunier21a.html},\n abstract = \t {This paper tackles the problem of adversarial examples from a game theoretic point of view. We study the open question of the existence of mixed Nash equilibria in the zero-sum game formed by the attacker and the classifier. While previous works usually allow only one player to use randomized strategies, we show the necessity of considering randomization for both the classifier and the attacker. We demonstrate that this game has no duality gap, meaning that it always admits approximate Nash equilibria. We also provide the first optimization algorithms to learn a mixture of classifiers that approximately realizes the value of this game, \\emph{i.e.} procedures to build an optimally robust randomized classifier.}\n}", "pdf": "http://proceedings.mlr.press/v139/meunier21a/meunier21a.pdf", "supp": "", "pdf_size": 1267291, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16626344452324777101&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Miles Team, LAMSADE, Universit\u00e9 Paris-Dauphine, Paris, France+Facebook AI Research, Paris, France; CREST, ENSAE, Paris, France; Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL), Switzerland; Miles Team, LAMSADE, Universit\u00e9 Paris-Dauphine, Paris, France; Miles Team, LAMSADE, Universit\u00e9 Paris-Dauphine, Paris, France", "aff_domain": "fb.com;ensae.fr;epfl.ch;dauphine.fr;dauphine.fr", "email": "fb.com;ensae.fr;epfl.ch;dauphine.fr;dauphine.fr", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/meunier21a.html", "aff_unique_index": "0+1;2;3;0;0", "aff_unique_norm": "Universit\u00e9 Paris-Dauphine;Meta;CREST;EPFL", "aff_unique_dep": "LAMSADE;Facebook AI Research;;", "aff_unique_url": "https://www.univ-paris-dauphine.fr;https://research.facebook.com;;https://www.epfl.ch", "aff_unique_abbr": ";FAIR;;EPFL", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "Paris;", "aff_country_unique_index": "0+0;0;1;0;0", "aff_country_unique": "France;Switzerland" }, { "title": "Model Distillation for Revenue Optimization: Interpretable Personalized Pricing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8575", "id": "8575", "proceeding": "http://proceedings.mlr.press/v139/biggs21a.html", "slides": "", "author_site": "Max Biggs, Wei Sun, Markus Ettl", "author": "Max Biggs; Wei Sun; Markus Ettl", "abstract": "Data-driven pricing strategies are becoming increasingly common, where customers are offered a personalized price based on features that are predictive of their valuation of a product. It is desirable for this pricing policy to be simple and interpretable, so it can be verified, checked for fairness, and easily implemented. However, efforts to incorporate machine learning into a pricing framework often lead to complex pricing policies that are not interpretable, resulting in slow adoption in practice. We present a novel, customized, prescriptive tree-based algorithm that distills knowledge from a complex black-box machine learning algorithm, segments customers with similar valuations and prescribes prices in such a way that maximizes revenue while maintaining interpretability. We quantify the regret of a resulting policy and demonstrate its efficacy in applications with both synthetic and real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-biggs21a,\n title = \t {Model Distillation for Revenue Optimization: Interpretable Personalized Pricing},\n author = {Biggs, Max and Sun, Wei and Ettl, Markus},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {946--956},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/biggs21a/biggs21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/biggs21a.html},\n abstract = \t {Data-driven pricing strategies are becoming increasingly common, where customers are offered a personalized price based on features that are predictive of their valuation of a product. It is desirable for this pricing policy to be simple and interpretable, so it can be verified, checked for fairness, and easily implemented. However, efforts to incorporate machine learning into a pricing framework often lead to complex pricing policies that are not interpretable, resulting in slow adoption in practice. We present a novel, customized, prescriptive tree-based algorithm that distills knowledge from a complex black-box machine learning algorithm, segments customers with similar valuations and prescribes prices in such a way that maximizes revenue while maintaining interpretability. We quantify the regret of a resulting policy and demonstrate its efficacy in applications with both synthetic and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/biggs21a/biggs21a.pdf", "supp": "", "pdf_size": 516766, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13706653035198169905&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Darden School of Business, University of Virginia, Virginia, USA; IBM Research, Yorktown Heights, New York, USA; IBM Research, Yorktown Heights, New York, USA", "aff_domain": "darden.virginia.edu; ; ", "email": "darden.virginia.edu; ; ", "github": "", "project": "https://arxiv.org/abs/2007.01903", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/biggs21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Virginia;IBM", "aff_unique_dep": "Darden School of Business;IBM Research", "aff_unique_url": "https://www.darden.virginia.edu;https://www.ibm.com/research", "aff_unique_abbr": "UVA;IBM", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Virginia;Yorktown Heights", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Model Fusion for Personalized Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9063", "id": "9063", "proceeding": "http://proceedings.mlr.press/v139/lam21a.html", "slides": "", "author_site": "Thanh Lam, Nghia Hoang, Bryan Kian Hsiang Low, Patrick Jaillet", "author": "Thanh Chi Lam; Nghia Hoang; Bryan Kian Hsiang Low; Patrick Jaillet", "abstract": "Production systems operating on a growing domain of analytic services often require generating warm-start solution models for emerging tasks with limited data. One potential approach to address this warm-start challenge is to adopt meta learning to generate a base model that can be adapted to solve unseen tasks with minimal fine-tuning. This however requires the training processes of previous solution models of existing tasks to be synchronized. This is not possible if these models were pre-trained separately on private data owned by different entities and cannot be synchronously re-trained. To accommodate for such scenarios, we develop a new personalized learning framework that synthesizes customized models for unseen tasks via fusion of independently pre-trained models of related tasks. We establish performance guarantee for the proposed framework and demonstrate its effectiveness on both synthetic and real datasets.", "bibtex": "@InProceedings{pmlr-v139-lam21a,\n title = \t {Model Fusion for Personalized Learning},\n author = {Lam, Thanh Chi and Hoang, Nghia and Low, Bryan Kian Hsiang and Jaillet, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5948--5958},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lam21a/lam21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lam21a.html},\n abstract = \t {Production systems operating on a growing domain of analytic services often require generating warm-start solution models for emerging tasks with limited data. One potential approach to address this warm-start challenge is to adopt meta learning to generate a base model that can be adapted to solve unseen tasks with minimal fine-tuning. This however requires the training processes of previous solution models of existing tasks to be synchronized. This is not possible if these models were pre-trained separately on private data owned by different entities and cannot be synchronously re-trained. To accommodate for such scenarios, we develop a new personalized learning framework that synthesizes customized models for unseen tasks via fusion of independently pre-trained models of related tasks. We establish performance guarantee for the proposed framework and demonstrate its effectiveness on both synthetic and real datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/lam21a/lam21a.pdf", "supp": "", "pdf_size": 935261, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9711661317472090802&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "National University of Singapore; AWS AI Labs, Amazon; National University of Singapore; Massachusetts Institute of Technology", "aff_domain": "nus.edu.sg;amazon.com;nus.edu.sg;mit.edu", "email": "nus.edu.sg;amazon.com;nus.edu.sg;mit.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lam21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "National University of Singapore;Amazon;Massachusetts Institute of Technology", "aff_unique_dep": ";AWS AI Labs;", "aff_unique_url": "https://www.nus.edu.sg;https://aws.amazon.com;https://web.mit.edu", "aff_unique_abbr": "NUS;Amazon;MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "Singapore;United States" }, { "title": "Model Performance Scaling with Multiple Data Sources", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9409", "id": "9409", "proceeding": "http://proceedings.mlr.press/v139/hashimoto21a.html", "slides": "", "author": "Tatsunori Hashimoto", "abstract": "Real-world machine learning systems are often trained using a mix of data sources with varying cost and quality. Understanding how the size and composition of a training dataset affect model performance is critical for advancing our understanding of generalization, as well as designing more effective data collection policies. We show that there is a simple scaling law that predicts the loss incurred by a model even under varying dataset composition. Our work expands recent observations of scaling laws for log-linear generalization error in the i.i.d setting and uses this to cast model performance prediction as a learning problem. Using the theory of optimal experimental design, we derive a simple rational function approximation to generalization error that can be fitted using a few model training runs. Our approach can achieve highly accurate ($r^2\\approx .9$) predictions of model performance under substantial extrapolation in two different standard supervised learning tasks and is accurate ($r^2 \\approx .83$) on more challenging machine translation and question answering tasks where many baselines achieve worse-than-random performance.", "bibtex": "@InProceedings{pmlr-v139-hashimoto21a,\n title = \t {Model Performance Scaling with Multiple Data Sources},\n author = {Hashimoto, Tatsunori},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4107--4116},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hashimoto21a/hashimoto21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hashimoto21a.html},\n abstract = \t {Real-world machine learning systems are often trained using a mix of data sources with varying cost and quality. Understanding how the size and composition of a training dataset affect model performance is critical for advancing our understanding of generalization, as well as designing more effective data collection policies. We show that there is a simple scaling law that predicts the loss incurred by a model even under varying dataset composition. Our work expands recent observations of scaling laws for log-linear generalization error in the i.i.d setting and uses this to cast model performance prediction as a learning problem. Using the theory of optimal experimental design, we derive a simple rational function approximation to generalization error that can be fitted using a few model training runs. Our approach can achieve highly accurate ($r^2\\approx .9$) predictions of model performance under substantial extrapolation in two different standard supervised learning tasks and is accurate ($r^2 \\approx .83$) on more challenging machine translation and question answering tasks where many baselines achieve worse-than-random performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/hashimoto21a/hashimoto21a.pdf", "supp": "", "pdf_size": 702765, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2505693185623451639&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Microsoft Semantic Machines", "aff_domain": "microsoft.com", "email": "microsoft.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/hashimoto21a.html", "aff_unique_index": "0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Semantic Machines", "aff_unique_url": "https://www.microsoft.com", "aff_unique_abbr": "Microsoft", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Model-Based Reinforcement Learning via Latent-Space Collocation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10165", "id": "10165", "proceeding": "http://proceedings.mlr.press/v139/rybkin21b.html", "slides": "", "author_site": "Oleh Rybkin, Chuning Zhu, Anusha Nagabandi, Kostas Daniilidis, Igor Mordatch, Sergey Levine", "author": "Oleh Rybkin; Chuning Zhu; Anusha Nagabandi; Kostas Daniilidis; Igor Mordatch; Sergey Levine", "abstract": "The ability to plan into the future while utilizing only raw high-dimensional observations, such as images, can provide autonomous agents with broad and general capabilities. However, realistic tasks require performing temporally extended reasoning, and cannot be solved with only myopic, short-sighted planning. Recent work in model-based reinforcement learning (RL) has shown impressive results on tasks that require only short-horizon reasoning. In this work, we study how the long-horizon planning abilities can be improved with an algorithm that optimizes over sequences of states, rather than actions, which allows better credit assignment. To achieve this, we draw on the idea of collocation and adapt it to the image-based setting by leveraging probabilistic latent variable models, resulting in an algorithm that optimizes trajectories over latent variables. Our latent collocation method (LatCo) provides a general and effective visual planning approach, and significantly outperforms prior model-based approaches on challenging visual control tasks with sparse rewards and long-term goals. See the videos on the supplementary website \\url{https://sites.google.com/view/latco-mbrl/.}", "bibtex": "@InProceedings{pmlr-v139-rybkin21b,\n title = \t {Model-Based Reinforcement Learning via Latent-Space Collocation},\n author = {Rybkin, Oleh and Zhu, Chuning and Nagabandi, Anusha and Daniilidis, Kostas and Mordatch, Igor and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9190--9201},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rybkin21b/rybkin21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/rybkin21b.html},\n abstract = \t {The ability to plan into the future while utilizing only raw high-dimensional observations, such as images, can provide autonomous agents with broad and general capabilities. However, realistic tasks require performing temporally extended reasoning, and cannot be solved with only myopic, short-sighted planning. Recent work in model-based reinforcement learning (RL) has shown impressive results on tasks that require only short-horizon reasoning. In this work, we study how the long-horizon planning abilities can be improved with an algorithm that optimizes over sequences of states, rather than actions, which allows better credit assignment. To achieve this, we draw on the idea of collocation and adapt it to the image-based setting by leveraging probabilistic latent variable models, resulting in an algorithm that optimizes trajectories over latent variables. Our latent collocation method (LatCo) provides a general and effective visual planning approach, and significantly outperforms prior model-based approaches on challenging visual control tasks with sparse rewards and long-term goals. See the videos on the supplementary website \\url{https://sites.google.com/view/latco-mbrl/.}}\n}", "pdf": "http://proceedings.mlr.press/v139/rybkin21b/rybkin21b.pdf", "supp": "", "pdf_size": 1014310, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2726935776109554696&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Pennsylvania; University of Pennsylvania; Covariant; University of Pennsylvania; Google AI; UC Berkeley", "aff_domain": "seas.upenn.edu; ; ; ; ; ", "email": "seas.upenn.edu; ; ; ; ; ", "github": "https://github.com/orybkin/latco", "project": "https://orybkin.github.io/latco/", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/rybkin21b.html", "aff_unique_index": "0;0;1;0;2;3", "aff_unique_norm": "University of Pennsylvania;Covariant;Google;University of California, Berkeley", "aff_unique_dep": ";;Google AI;", "aff_unique_url": "https://www.upenn.edu;;https://ai.google;https://www.berkeley.edu", "aff_unique_abbr": "UPenn;;Google AI;UC Berkeley", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Mountain View;Berkeley", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States;" }, { "title": "Model-Free Reinforcement Learning: from Clipped Pseudo-Regret to Sample Complexity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8593", "id": "8593", "proceeding": "http://proceedings.mlr.press/v139/zhang21ab.html", "slides": "/media/icml-2021/Slides/8593.pdf", "author_site": "Zhang Zihan, Yuan Zhou, Xiangyang Ji", "author": "Zihan Zhang; Yuan Zhou; Xiangyang Ji", "abstract": "In this paper we consider the problem of learning an $\\epsilon$-optimal policy for a discounted Markov Decision Process (MDP). Given an MDP with $S$ states, $A$ actions, the discount factor $\\gamma \\in (0,1)$, and an approximation threshold $\\epsilon > 0$, we provide a model-free algorithm to learn an $\\epsilon$-optimal policy with sample complexity $\\tilde{O}(\\frac{SA\\ln(1/p)}{\\epsilon^2(1-\\gamma)^{5.5}})$ \\footnote{In this work, the notation $\\tilde{O}(\\cdot)$ hides poly-logarithmic factors of $S,A,1/(1-\\gamma)$, and $1/\\epsilon$.} and success probability $(1-p)$. For small enough $\\epsilon$, we show an improved algorithm with sample complexity $\\tilde{O}(\\frac{SA\\ln(1/p)}{\\epsilon^2(1-\\gamma)^{3}})$. While the first bound improves upon all known model-free algorithms and model-based ones with tight dependence on $S$, our second algorithm beats all known sample complexity bounds and matches the information theoretic lower bound up to logarithmic factors.", "bibtex": "@InProceedings{pmlr-v139-zhang21ab,\n title = \t {Model-Free Reinforcement Learning: from Clipped Pseudo-Regret to Sample Complexity},\n author = {Zhang, Zihan and Zhou, Yuan and Ji, Xiangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12653--12662},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21ab/zhang21ab.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21ab.html},\n abstract = \t {In this paper we consider the problem of learning an $\\epsilon$-optimal policy for a discounted Markov Decision Process (MDP). Given an MDP with $S$ states, $A$ actions, the discount factor $\\gamma \\in (0,1)$, and an approximation threshold $\\epsilon > 0$, we provide a model-free algorithm to learn an $\\epsilon$-optimal policy with sample complexity $\\tilde{O}(\\frac{SA\\ln(1/p)}{\\epsilon^2(1-\\gamma)^{5.5}})$ \\footnote{In this work, the notation $\\tilde{O}(\\cdot)$ hides poly-logarithmic factors of $S,A,1/(1-\\gamma)$, and $1/\\epsilon$.} and success probability $(1-p)$. For small enough $\\epsilon$, we show an improved algorithm with sample complexity $\\tilde{O}(\\frac{SA\\ln(1/p)}{\\epsilon^2(1-\\gamma)^{3}})$. While the first bound improves upon all known model-free algorithms and model-based ones with tight dependence on $S$, our second algorithm beats all known sample complexity bounds and matches the information theoretic lower bound up to logarithmic factors.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21ab/zhang21ab.pdf", "supp": "", "pdf_size": 503064, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13433339362662011408&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Tsinghua University; University of Illinois Urbana-Champaign; Tsinghua University", "aff_domain": "mails.tsinghua.edu.cn;illinois.edu;tsinghua.edu.cn", "email": "mails.tsinghua.edu.cn;illinois.edu;tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21ab.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Tsinghua University;University of Illinois Urbana-Champaign", "aff_unique_dep": ";", "aff_unique_url": "https://www.tsinghua.edu.cn;https://illinois.edu", "aff_unique_abbr": "THU;UIUC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Urbana-Champaign", "aff_country_unique_index": "0;1;0", "aff_country_unique": "China;United States" }, { "title": "Model-Free and Model-Based Policy Evaluation when Causality is Uncertain", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8969", "id": "8969", "proceeding": "http://proceedings.mlr.press/v139/bruns-smith21a.html", "slides": "", "author_site": "David Bruns-Smith", "author": "David A Bruns-Smith", "abstract": "When decision-makers can directly intervene, policy evaluation algorithms give valid causal estimates. In off-policy evaluation (OPE), there may exist unobserved variables that both impact the dynamics and are used by the unknown behavior policy. These \u201cconfounders\u201d will introduce spurious correlations and naive estimates for a new policy will be biased. We develop worst-case bounds to assess sensitivity to these unobserved confounders in finite horizons when confounders are drawn iid each period. We demonstrate that a model-based approach with robust MDPs gives sharper lower bounds by exploiting domain knowledge about the dynamics. Finally, we show that when unobserved confounders are persistent over time, OPE is far more difficult and existing techniques produce extremely conservative bounds.", "bibtex": "@InProceedings{pmlr-v139-bruns-smith21a,\n title = \t {Model-Free and Model-Based Policy Evaluation when Causality is Uncertain},\n author = {Bruns-Smith, David A},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1116--1126},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bruns-smith21a/bruns-smith21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bruns-smith21a.html},\n abstract = \t {When decision-makers can directly intervene, policy evaluation algorithms give valid causal estimates. In off-policy evaluation (OPE), there may exist unobserved variables that both impact the dynamics and are used by the unknown behavior policy. These \u201cconfounders\u201d will introduce spurious correlations and naive estimates for a new policy will be biased. We develop worst-case bounds to assess sensitivity to these unobserved confounders in finite horizons when confounders are drawn iid each period. We demonstrate that a model-based approach with robust MDPs gives sharper lower bounds by exploiting domain knowledge about the dynamics. Finally, we show that when unobserved confounders are persistent over time, OPE is far more difficult and existing techniques produce extremely conservative bounds.}\n}", "pdf": "http://proceedings.mlr.press/v139/bruns-smith21a/bruns-smith21a.pdf", "supp": "", "pdf_size": 1728811, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16870291472875022754&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical Engineering and Computer Sciences, University of California, Berkeley, USA", "aff_domain": "berkeley.edu", "email": "berkeley.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/bruns-smith21a.html", "aff_unique_index": "0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Model-Targeted Poisoning Attacks with Provable Convergence", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10349", "id": "10349", "proceeding": "http://proceedings.mlr.press/v139/suya21a.html", "slides": "/media/icml-2021/Slides/10349.pdf", "author_site": "Fnu Suya, Saeed Mahloujifar, Anshuman Suri, David Evans, Yuan Tian", "author": "Fnu Suya; Saeed Mahloujifar; Anshuman Suri; David Evans; Yuan Tian", "abstract": "In a poisoning attack, an adversary who controls a small fraction of the training data attempts to select that data, so a model is induced that misbehaves in a particular way. We consider poisoning attacks against convex machine learning models and propose an efficient poisoning attack designed to induce a model specified by the adversary. Unlike previous model-targeted poisoning attacks, our attack comes with provable convergence to any attainable target model. We also provide a lower bound on the minimum number of poisoning points needed to achieve a given target model. Our method uses online convex optimization and finds poisoning points incrementally. This provides more flexibility than previous attacks which require an a priori assumption about the number of poisoning points. Our attack is the first model-targeted poisoning attack that provides provable convergence for convex models. In our experiments, it either exceeds or matches state-of-the-art attacks in terms of attack success rate and distance to the target model.", "bibtex": "@InProceedings{pmlr-v139-suya21a,\n title = \t {Model-Targeted Poisoning Attacks with Provable Convergence},\n author = {Suya, Fnu and Mahloujifar, Saeed and Suri, Anshuman and Evans, David and Tian, Yuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10000--10010},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/suya21a/suya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/suya21a.html},\n abstract = \t {In a poisoning attack, an adversary who controls a small fraction of the training data attempts to select that data, so a model is induced that misbehaves in a particular way. We consider poisoning attacks against convex machine learning models and propose an efficient poisoning attack designed to induce a model specified by the adversary. Unlike previous model-targeted poisoning attacks, our attack comes with provable convergence to any attainable target model. We also provide a lower bound on the minimum number of poisoning points needed to achieve a given target model. Our method uses online convex optimization and finds poisoning points incrementally. This provides more flexibility than previous attacks which require an a priori assumption about the number of poisoning points. Our attack is the first model-targeted poisoning attack that provides provable convergence for convex models. In our experiments, it either exceeds or matches state-of-the-art attacks in terms of attack success rate and distance to the target model.}\n}", "pdf": "http://proceedings.mlr.press/v139/suya21a/suya21a.pdf", "supp": "", "pdf_size": 407636, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1651990358981165914&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of Virginia; Princeton University; University of Virginia; University of Virginia; University of Virginia", "aff_domain": "virginia.edu;princeton.edu; ; ; ", "email": "virginia.edu;princeton.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/suya21a.html", "aff_unique_index": "0;1;0;0;0", "aff_unique_norm": "University of Virginia;Princeton University", "aff_unique_dep": ";", "aff_unique_url": "https://www.virginia.edu;https://www.princeton.edu", "aff_unique_abbr": "UVA;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Model-based Reinforcement Learning for Continuous Control with Posterior Sampling", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9011", "id": "9011", "proceeding": "http://proceedings.mlr.press/v139/fan21b.html", "slides": "", "author_site": "Ying Fan, Yifei Ming", "author": "Ying Fan; Yifei Ming", "abstract": "Balancing exploration and exploitation is crucial in reinforcement learning (RL). In this paper, we study model-based posterior sampling for reinforcement learning (PSRL) in continuous state-action spaces theoretically and empirically. First, we show the first regret bound of PSRL in continuous spaces which is polynomial in the episode length to the best of our knowledge. With the assumption that reward and transition functions can be modeled by Bayesian linear regression, we develop a regret bound of $\\tilde{O}(H^{3/2}d\\sqrt{T})$, where $H$ is the episode length, $d$ is the dimension of the state-action space, and $T$ indicates the total time steps. This result matches the best-known regret bound of non-PSRL methods in linear MDPs. Our bound can be extended to nonlinear cases as well with feature embedding: using linear kernels on the feature representation $\\phi$, the regret bound becomes $\\tilde{O}(H^{3/2}d_{\\phi}\\sqrt{T})$, where $d_\\phi$ is the dimension of the representation space. Moreover, we present MPC-PSRL, a model-based posterior sampling algorithm with model predictive control for action selection. To capture the uncertainty in models, we use Bayesian linear regression on the penultimate layer (the feature representation layer $\\phi$) of neural networks. Empirical results show that our algorithm achieves the state-of-the-art sample efficiency in benchmark continuous control tasks compared to prior model-based algorithms, and matches the asymptotic performance of model-free algorithms.", "bibtex": "@InProceedings{pmlr-v139-fan21b,\n title = \t {Model-based Reinforcement Learning for Continuous Control with Posterior Sampling},\n author = {Fan, Ying and Ming, Yifei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3078--3087},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fan21b/fan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/fan21b.html},\n abstract = \t {Balancing exploration and exploitation is crucial in reinforcement learning (RL). In this paper, we study model-based posterior sampling for reinforcement learning (PSRL) in continuous state-action spaces theoretically and empirically. First, we show the first regret bound of PSRL in continuous spaces which is polynomial in the episode length to the best of our knowledge. With the assumption that reward and transition functions can be modeled by Bayesian linear regression, we develop a regret bound of $\\tilde{O}(H^{3/2}d\\sqrt{T})$, where $H$ is the episode length, $d$ is the dimension of the state-action space, and $T$ indicates the total time steps. This result matches the best-known regret bound of non-PSRL methods in linear MDPs. Our bound can be extended to nonlinear cases as well with feature embedding: using linear kernels on the feature representation $\\phi$, the regret bound becomes $\\tilde{O}(H^{3/2}d_{\\phi}\\sqrt{T})$, where $d_\\phi$ is the dimension of the representation space. Moreover, we present MPC-PSRL, a model-based posterior sampling algorithm with model predictive control for action selection. To capture the uncertainty in models, we use Bayesian linear regression on the penultimate layer (the feature representation layer $\\phi$) of neural networks. Empirical results show that our algorithm achieves the state-of-the-art sample efficiency in benchmark continuous control tasks compared to prior model-based algorithms, and matches the asymptotic performance of model-free algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/fan21b/fan21b.pdf", "supp": "", "pdf_size": 922505, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9782112597540480270&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of Wisconsin-Madison; University of Wisconsin-Madison", "aff_domain": "wisc.edu;wisc.edu", "email": "wisc.edu;wisc.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/fan21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Wisconsin-Madison", "aff_unique_dep": "", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW-Madison", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Modeling Hierarchical Structures with Continuous Recursive Neural Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8993", "id": "8993", "proceeding": "http://proceedings.mlr.press/v139/chowdhury21a.html", "slides": "/media/icml-2021/Slides/8993.pdf", "author_site": "Jishnu Ray Chowdhury, Cornelia Caragea", "author": "Jishnu Ray Chowdhury; Cornelia Caragea", "abstract": "Recursive Neural Networks (RvNNs), which compose sequences according to their underlying hierarchical syntactic structure, have performed well in several natural language processing tasks compared to similar models without structural biases. However, traditional RvNNs are incapable of inducing the latent structure in a plain text sequence on their own. Several extensions have been proposed to overcome this limitation. Nevertheless, these extensions tend to rely on surrogate gradients or reinforcement learning at the cost of higher bias or variance. In this work, we propose Continuous Recursive Neural Network (CRvNN) as a backpropagation-friendly alternative to address the aforementioned limitations. This is done by incorporating a continuous relaxation to the induced structure. We demonstrate that CRvNN achieves strong performance in challenging synthetic tasks such as logical inference (Bowman et al., 2015b) and ListOps (Nangia & Bowman, 2018). We also show that CRvNN performs comparably or better than prior latent structure models on real-world tasks such as sentiment analysis and natural language inference.", "bibtex": "@InProceedings{pmlr-v139-chowdhury21a,\n title = \t {Modeling Hierarchical Structures with Continuous Recursive Neural Networks},\n author = {Chowdhury, Jishnu Ray and Caragea, Cornelia},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1975--1988},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chowdhury21a/chowdhury21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chowdhury21a.html},\n abstract = \t {Recursive Neural Networks (RvNNs), which compose sequences according to their underlying hierarchical syntactic structure, have performed well in several natural language processing tasks compared to similar models without structural biases. However, traditional RvNNs are incapable of inducing the latent structure in a plain text sequence on their own. Several extensions have been proposed to overcome this limitation. Nevertheless, these extensions tend to rely on surrogate gradients or reinforcement learning at the cost of higher bias or variance. In this work, we propose Continuous Recursive Neural Network (CRvNN) as a backpropagation-friendly alternative to address the aforementioned limitations. This is done by incorporating a continuous relaxation to the induced structure. We demonstrate that CRvNN achieves strong performance in challenging synthetic tasks such as logical inference (Bowman et al., 2015b) and ListOps (Nangia & Bowman, 2018). We also show that CRvNN performs comparably or better than prior latent structure models on real-world tasks such as sentiment analysis and natural language inference.}\n}", "pdf": "http://proceedings.mlr.press/v139/chowdhury21a/chowdhury21a.pdf", "supp": "", "pdf_size": 346990, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12633108093638083396&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Computer Science, University of Illinois at Chicago; Computer Science, University of Illinois at Chicago", "aff_domain": "uic.edu;uic.edu", "email": "uic.edu;uic.edu", "github": "https://github.com/JRC1995/Continuous-RvNN", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/chowdhury21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Illinois at Chicago", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.uic.edu", "aff_unique_abbr": "UIC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Chicago", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Modelling Behavioural Diversity for Learning in Open-Ended Games", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10599", "id": "10599", "proceeding": "http://proceedings.mlr.press/v139/perez-nieves21a.html", "slides": "", "author_site": "Nicolas Perez-Nieves, Yaodong Yang, Oliver Slumbers, David Mguni, Ying Wen, Jun Wang", "author": "Nicolas Perez-Nieves; Yaodong Yang; Oliver Slumbers; David H Mguni; Ying Wen; Jun Wang", "abstract": "Promoting behavioural diversity is critical for solving games with non-transitive dynamics where strategic cycles exist, and there is no consistent winner (e.g., Rock-Paper-Scissors). Yet, there is a lack of rigorous treatment for defining diversity and constructing diversity-aware learning dynamics. In this work, we offer a geometric interpretation of behavioural diversity in games and introduce a novel diversity metric based on \\emph{determinantal point processes} (DPP). By incorporating the diversity metric into best-response dynamics, we develop \\emph{diverse fictitious play} and \\emph{diverse policy-space response oracle} for solving normal-form games and open-ended games. We prove the uniqueness of the diverse best response and the convergence of our algorithms on two-player games. Importantly, we show that maximising the DPP-based diversity metric guarantees to enlarge the \\emph{gamescape} \u2013 convex polytopes spanned by agents\u2019 mixtures of strategies. To validate our diversity-aware solvers, we test on tens of games that show strong non-transitivity. Results suggest that our methods achieve at least the same, and in most games, lower exploitability than PSRO solvers by finding effective and diverse strategies.", "bibtex": "@InProceedings{pmlr-v139-perez-nieves21a,\n title = \t {Modelling Behavioural Diversity for Learning in Open-Ended Games},\n author = {Perez-Nieves, Nicolas and Yang, Yaodong and Slumbers, Oliver and Mguni, David H and Wen, Ying and Wang, Jun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8514--8524},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/perez-nieves21a/perez-nieves21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/perez-nieves21a.html},\n abstract = \t {Promoting behavioural diversity is critical for solving games with non-transitive dynamics where strategic cycles exist, and there is no consistent winner (e.g., Rock-Paper-Scissors). Yet, there is a lack of rigorous treatment for defining diversity and constructing diversity-aware learning dynamics. In this work, we offer a geometric interpretation of behavioural diversity in games and introduce a novel diversity metric based on \\emph{determinantal point processes} (DPP). By incorporating the diversity metric into best-response dynamics, we develop \\emph{diverse fictitious play} and \\emph{diverse policy-space response oracle} for solving normal-form games and open-ended games. We prove the uniqueness of the diverse best response and the convergence of our algorithms on two-player games. Importantly, we show that maximising the DPP-based diversity metric guarantees to enlarge the \\emph{gamescape} \u2013 convex polytopes spanned by agents\u2019 mixtures of strategies. To validate our diversity-aware solvers, we test on tens of games that show strong non-transitivity. Results suggest that our methods achieve at least the same, and in most games, lower exploitability than PSRO solvers by finding effective and diverse strategies.}\n}", "pdf": "http://proceedings.mlr.press/v139/perez-nieves21a/perez-nieves21a.pdf", "supp": "", "pdf_size": 1451956, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=227312628456814553&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Huawei U.K. + Imperial College London; Huawei U.K. + University College London; University College London; Huawei U.K.; University College London; Huawei U.K. + University College London", "aff_domain": "huawei.com;outlook.com;ucl.ac.uk;huawei.com;ucl.ac.uk;ucl.ac.uk", "email": "huawei.com;outlook.com;ucl.ac.uk;huawei.com;ucl.ac.uk;ucl.ac.uk", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/perez-nieves21a.html", "aff_unique_index": "0+1;0+2;2;0;2;0+2", "aff_unique_norm": "Huawei;Imperial College London;University College London", "aff_unique_dep": "Huawei;;", "aff_unique_url": "https://www.huawei.com/uk;https://www.imperial.ac.uk;https://www.ucl.ac.uk", "aff_unique_abbr": "Huawei;ICL;UCL", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0;0;0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Modularity in Reinforcement Learning via Algorithmic Independence in Credit Assignment", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8697", "id": "8697", "proceeding": "http://proceedings.mlr.press/v139/chang21b.html", "slides": "/media/icml-2021/Slides/8697.pdf", "author_site": "Michael Chang, Sid Kaushik, Sergey Levine, Thomas Griffiths", "author": "Michael Chang; Sid Kaushik; Sergey Levine; Tom Griffiths", "abstract": "Many transfer problems require re-using previously optimal decisions for solving new tasks, which suggests the need for learning algorithms that can modify the mechanisms for choosing certain actions independently of those for choosing others. However, there is currently no formalism nor theory for how to achieve this kind of modular credit assignment. To answer this question, we define modular credit assignment as a constraint on minimizing the algorithmic mutual information among feedback signals for different decisions. We introduce what we call the modularity criterion for testing whether a learning algorithm satisfies this constraint by performing causal analysis on the algorithm itself. We generalize the recently proposed societal decision-making framework as a more granular formalism than the Markov decision process to prove that for decision sequences that do not contain cycles, certain single-step temporal difference action-value methods meet this criterion while all policy-gradient methods do not. Empirical evidence suggests that such action-value methods are more sample efficient than policy-gradient methods on transfer problems that require only sparse changes to a sequence of previously optimal decisions.", "bibtex": "@InProceedings{pmlr-v139-chang21b,\n title = \t {Modularity in Reinforcement Learning via Algorithmic Independence in Credit Assignment},\n author = {Chang, Michael and Kaushik, Sid and Levine, Sergey and Griffiths, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1452--1462},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chang21b/chang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/chang21b.html},\n abstract = \t {Many transfer problems require re-using previously optimal decisions for solving new tasks, which suggests the need for learning algorithms that can modify the mechanisms for choosing certain actions independently of those for choosing others. However, there is currently no formalism nor theory for how to achieve this kind of modular credit assignment. To answer this question, we define modular credit assignment as a constraint on minimizing the algorithmic mutual information among feedback signals for different decisions. We introduce what we call the modularity criterion for testing whether a learning algorithm satisfies this constraint by performing causal analysis on the algorithm itself. We generalize the recently proposed societal decision-making framework as a more granular formalism than the Markov decision process to prove that for decision sequences that do not contain cycles, certain single-step temporal difference action-value methods meet this criterion while all policy-gradient methods do not. Empirical evidence suggests that such action-value methods are more sample efficient than policy-gradient methods on transfer problems that require only sparse changes to a sequence of previously optimal decisions.}\n}", "pdf": "http://proceedings.mlr.press/v139/chang21b/chang21b.pdf", "supp": "", "pdf_size": 2239218, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15257527092469855212&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of California, Berkeley, USA; Department of Computer Science, University of California, Berkeley, USA; Department of Computer Science, University of California, Berkeley, USA; Department of Computer Science, Princeton University, USA", "aff_domain": "berkeley.edu; ; ; ", "email": "berkeley.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chang21b.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of California, Berkeley;Princeton University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.berkeley.edu;https://www.princeton.edu", "aff_unique_abbr": "UC Berkeley;Princeton", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Momentum Residual Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9463", "id": "9463", "proceeding": "http://proceedings.mlr.press/v139/sander21a.html", "slides": "/media/icml-2021/Slides/9463.pdf", "author_site": "Michael Sander, Pierre Ablin, Mathieu Blondel, Gabriel Peyr\u00e9", "author": "Michael E. Sander; Pierre Ablin; Mathieu Blondel; Gabriel Peyr\u00e9", "abstract": "The training of deep residual neural networks (ResNets) with backpropagation has a memory cost that increases linearly with respect to the depth of the network. A simple way to circumvent this issue is to use reversible architectures. In this paper, we propose to change the forward rule of a ResNet by adding a momentum term. The resulting networks, momentum residual neural networks (MomentumNets), are invertible. Unlike previous invertible architectures, they can be used as a drop-in replacement for any existing ResNet block. We show that MomentumNets can be interpreted in the infinitesimal step size regime as second-order ordinary differential equations (ODEs) and exactly characterize how adding momentum progressively increases the representation capabilities of MomentumNets: they can learn any linear mapping up to a multiplicative factor, while ResNets cannot. In a learning to optimize setting, where convergence to a fixed point is required, we show theoretically and empirically that our method succeeds while existing invertible architectures fail. We show on CIFAR and ImageNet that MomentumNets have the same accuracy as ResNets, while having a much smaller memory footprint, and show that pre-trained MomentumNets are promising for fine-tuning models.", "bibtex": "@InProceedings{pmlr-v139-sander21a,\n title = \t {Momentum Residual Neural Networks},\n author = {Sander, Michael E. and Ablin, Pierre and Blondel, Mathieu and Peyr{\\'e}, Gabriel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9276--9287},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sander21a/sander21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sander21a.html},\n abstract = \t {The training of deep residual neural networks (ResNets) with backpropagation has a memory cost that increases linearly with respect to the depth of the network. A simple way to circumvent this issue is to use reversible architectures. In this paper, we propose to change the forward rule of a ResNet by adding a momentum term. The resulting networks, momentum residual neural networks (MomentumNets), are invertible. Unlike previous invertible architectures, they can be used as a drop-in replacement for any existing ResNet block. We show that MomentumNets can be interpreted in the infinitesimal step size regime as second-order ordinary differential equations (ODEs) and exactly characterize how adding momentum progressively increases the representation capabilities of MomentumNets: they can learn any linear mapping up to a multiplicative factor, while ResNets cannot. In a learning to optimize setting, where convergence to a fixed point is required, we show theoretically and empirically that our method succeeds while existing invertible architectures fail. We show on CIFAR and ImageNet that MomentumNets have the same accuracy as ResNets, while having a much smaller memory footprint, and show that pre-trained MomentumNets are promising for fine-tuning models.}\n}", "pdf": "http://proceedings.mlr.press/v139/sander21a/sander21a.pdf", "supp": "", "pdf_size": 2997596, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=195539269682246494&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Ecole Normale Sup\u00e9rieure, DMA, Paris, France+CNRS, France; Ecole Normale Sup\u00e9rieure, DMA, Paris, France+CNRS, France; Google Research, Brain team; Ecole Normale Sup\u00e9rieure, DMA, Paris, France+CNRS, France", "aff_domain": "ens.fr;ens.fr;google.com;ens.fr", "email": "ens.fr;ens.fr;google.com;ens.fr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sander21a.html", "aff_unique_index": "0+1;0+1;2;0+1", "aff_unique_norm": "Ecole Normale Sup\u00e9rieure;CNRS;Google", "aff_unique_dep": "DMA;;Google Research", "aff_unique_url": "https://www.ens.fr;https://www.cnrs.fr;https://research.google", "aff_unique_abbr": "ENS;CNRS;Google", "aff_campus_unique_index": "0;0;2;0", "aff_campus_unique": "Paris;;Mountain View", "aff_country_unique_index": "0+0;0+0;1;0+0", "aff_country_unique": "France;United States" }, { "title": "Monotonic Robust Policy Optimization with Model Discrepancy", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10253", "id": "10253", "proceeding": "http://proceedings.mlr.press/v139/jiang21c.html", "slides": "", "author_site": "yuankun jiang, Chenglin Li, Wenrui Dai, Junni Zou, Hongkai Xiong", "author": "Yuankun Jiang; Chenglin Li; Wenrui Dai; Junni Zou; Hongkai Xiong", "abstract": "State-of-the-art deep reinforcement learning (DRL) algorithms tend to overfit due to the model discrepancy between source and target environments. Though applying domain randomization during training can improve the average performance by randomly generating a sufficient diversity of environments in simulator, the worst-case environment is still neglected without any performance guarantee. Since the average and worst-case performance are both important for generalization in RL, in this paper, we propose a policy optimization approach for concurrently improving the policy\u2019s performance in the average and worst-case environment. We theoretically derive a lower bound for the worst-case performance of a given policy by relating it to the expected performance. Guided by this lower bound, we formulate an optimization problem to jointly optimize the policy and sampling distribution, and prove that by iteratively solving it the worst-case performance is monotonically improved. We then develop a practical algorithm, named monotonic robust policy optimization (MRPO). Experimental evaluations in several robot control tasks demonstrate that MRPO can generally improve both the average and worst-case performance in the source environments for training, and facilitate in all cases the learned policy with a better generalization capability in some unseen testing environments.", "bibtex": "@InProceedings{pmlr-v139-jiang21c,\n title = \t {Monotonic Robust Policy Optimization with Model Discrepancy},\n author = {Jiang, Yuankun and Li, Chenglin and Dai, Wenrui and Zou, Junni and Xiong, Hongkai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4951--4960},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21c/jiang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21c.html},\n abstract = \t {State-of-the-art deep reinforcement learning (DRL) algorithms tend to overfit due to the model discrepancy between source and target environments. Though applying domain randomization during training can improve the average performance by randomly generating a sufficient diversity of environments in simulator, the worst-case environment is still neglected without any performance guarantee. Since the average and worst-case performance are both important for generalization in RL, in this paper, we propose a policy optimization approach for concurrently improving the policy\u2019s performance in the average and worst-case environment. We theoretically derive a lower bound for the worst-case performance of a given policy by relating it to the expected performance. Guided by this lower bound, we formulate an optimization problem to jointly optimize the policy and sampling distribution, and prove that by iteratively solving it the worst-case performance is monotonically improved. We then develop a practical algorithm, named monotonic robust policy optimization (MRPO). Experimental evaluations in several robot control tasks demonstrate that MRPO can generally improve both the average and worst-case performance in the source environments for training, and facilitate in all cases the learned policy with a better generalization capability in some unseen testing environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21c/jiang21c.pdf", "supp": "", "pdf_size": 2692296, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12281009890386859927&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China; Department of Electronic Engineering, Shanghai Jiao Tong University, Shanghai, China", "aff_domain": "sjtu.edu.cn;sjtu.edu.cn; ;cs.sjtu.edu.cn; ", "email": "sjtu.edu.cn;sjtu.edu.cn; ;cs.sjtu.edu.cn; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/jiang21c.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Shanghai Jiao Tong University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.sjtu.edu.cn", "aff_unique_abbr": "SJTU", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Shanghai", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Monte Carlo Variational Auto-Encoders", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8527", "id": "8527", "proceeding": "http://proceedings.mlr.press/v139/thin21a.html", "slides": "", "author_site": "Achille Thin, Nikita Kotelevskii, Arnaud Doucet, Alain Durmus, Eric Moulines, Maxim Panov", "author": "Achille Thin; Nikita Kotelevskii; Arnaud Doucet; Alain Durmus; Eric Moulines; Maxim Panov", "abstract": "Variational auto-encoders (VAE) are popular deep latent variable models which are trained by maximizing an Evidence Lower Bound (ELBO). To obtain tighter ELBO and hence better variational approximations, it has been proposed to use importance sampling to get a lower variance estimate of the evidence. However, importance sampling is known to perform poorly in high dimensions. While it has been suggested many times in the literature to use more sophisticated algorithms such as Annealed Importance Sampling (AIS) and its Sequential Importance Sampling (SIS) extensions, the potential benefits brought by these advanced techniques have never been realized for VAE: the AIS estimate cannot be easily differentiated, while SIS requires the specification of carefully chosen backward Markov kernels. In this paper, we address both issues and demonstrate the performance of the resulting Monte Carlo VAEs on a variety of applications.", "bibtex": "@InProceedings{pmlr-v139-thin21a,\n title = \t {Monte Carlo Variational Auto-Encoders},\n author = {Thin, Achille and Kotelevskii, Nikita and Doucet, Arnaud and Durmus, Alain and Moulines, Eric and Panov, Maxim},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10247--10257},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/thin21a/thin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/thin21a.html},\n abstract = \t {Variational auto-encoders (VAE) are popular deep latent variable models which are trained by maximizing an Evidence Lower Bound (ELBO). To obtain tighter ELBO and hence better variational approximations, it has been proposed to use importance sampling to get a lower variance estimate of the evidence. However, importance sampling is known to perform poorly in high dimensions. While it has been suggested many times in the literature to use more sophisticated algorithms such as Annealed Importance Sampling (AIS) and its Sequential Importance Sampling (SIS) extensions, the potential benefits brought by these advanced techniques have never been realized for VAE: the AIS estimate cannot be easily differentiated, while SIS requires the specification of carefully chosen backward Markov kernels. In this paper, we address both issues and demonstrate the performance of the resulting Monte Carlo VAEs on a variety of applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/thin21a/thin21a.pdf", "supp": "", "pdf_size": 864963, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=589845268802830825&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "CMAP, Ecole Polytechnique, Universite Paris-Saclay, France; CDISE, Skolkovo Institute of Science and Technology, Moscow, Russia; Ecole Nationale Superieure Paris-Saclay, France; CDISE, Skolkovo Institute of Science and Technology, Moscow, Russia + HDI Lab, HSE University, Moscow, Russia; CMAP, Ecole Polytechnique, Universite Paris-Saclay, France + HDI Lab, HSE University, Moscow, Russia; University of Oxford", "aff_domain": "polytechnique.edu; ; ; ; ; ", "email": "polytechnique.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/thin21a.html", "aff_unique_index": "0;1;2;1+3;0+3;4", "aff_unique_norm": "Ecole Polytechnique;Skolkovo Institute of Science and Technology;Ecole Nationale Superieure Paris-Saclay;HSE University;University of Oxford", "aff_unique_dep": "CMAP;CDISE;;HDI Lab;", "aff_unique_url": "https://www.polytechnique.edu;https://www.skoltech.ru;https://ensparis-saclay.fr;https://hse.ru;https://www.ox.ac.uk", "aff_unique_abbr": "Polytechnique;Skoltech;ENS Paris-Saclay;;Oxford", "aff_campus_unique_index": "1;1+1;1", "aff_campus_unique": ";Moscow", "aff_country_unique_index": "0;1;0;1+1;0+1;2", "aff_country_unique": "France;Russian Federation;United Kingdom" }, { "title": "More Powerful and General Selective Inference for Stepwise Feature Selection using Homotopy Method", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8547", "id": "8547", "proceeding": "http://proceedings.mlr.press/v139/sugiyama21a.html", "slides": "", "author_site": "Kazuya Sugiyama, Vo Nguyen Le Duy, Ichiro Takeuchi", "author": "Kazuya Sugiyama; Vo Nguyen Le Duy; Ichiro Takeuchi", "abstract": "Conditional selective inference (SI) has been actively studied as a new statistical inference framework for data-driven hypotheses. The basic idea of conditional SI is to make inferences conditional on the selection event characterized by a set of linear and/or quadratic inequalities. Conditional SI has been mainly studied in the context of feature selection such as stepwise feature selection (SFS). The main limitation of the existing conditional SI methods is the loss of power due to over-conditioning, which is required for computational tractability. In this study, we develop a more powerful and general conditional SI method for SFS using the homotopy method which enables us to overcome this limitation. The homotopy-based SI is especially effective for more complicated feature selection algorithms. As an example, we develop a conditional SI method for forward-backward SFS with AIC-based stopping criteria and show that it is not adversely affected by the increased complexity of the algorithm. We conduct several experiments to demonstrate the effectiveness and efficiency of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-sugiyama21a,\n title = \t {More Powerful and General Selective Inference for Stepwise Feature Selection using Homotopy Method},\n author = {Sugiyama, Kazuya and Duy, Vo Nguyen Le and Takeuchi, Ichiro},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9891--9901},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sugiyama21a/sugiyama21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sugiyama21a.html},\n abstract = \t {Conditional selective inference (SI) has been actively studied as a new statistical inference framework for data-driven hypotheses. The basic idea of conditional SI is to make inferences conditional on the selection event characterized by a set of linear and/or quadratic inequalities. Conditional SI has been mainly studied in the context of feature selection such as stepwise feature selection (SFS). The main limitation of the existing conditional SI methods is the loss of power due to over-conditioning, which is required for computational tractability. In this study, we develop a more powerful and general conditional SI method for SFS using the homotopy method which enables us to overcome this limitation. The homotopy-based SI is especially effective for more complicated feature selection algorithms. As an example, we develop a conditional SI method for forward-backward SFS with AIC-based stopping criteria and show that it is not adversely affected by the increased complexity of the algorithm. We conduct several experiments to demonstrate the effectiveness and efficiency of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/sugiyama21a/sugiyama21a.pdf", "supp": "", "pdf_size": 1337848, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16505950380721964907&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Nagoya Institute of Technology, Japan + RIKEN, Japan; Nagoya Institute of Technology, Japan + RIKEN, Japan; Nagoya Institute of Technology, Japan + RIKEN, Japan", "aff_domain": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp", "email": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sugiyama21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Nagoya Institute of Technology;RIKEN", "aff_unique_dep": ";", "aff_unique_url": "https://www.nitech.ac.jp;https://www.riken.jp", "aff_unique_abbr": "NIT;RIKEN", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Moreau-Yosida $f$-divergences", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10293", "id": "10293", "proceeding": "http://proceedings.mlr.press/v139/terjek21a.html", "slides": "/media/icml-2021/Slides/10293.pdf", "author": "D\u00e1vid Terj\u00e9k", "abstract": "Variational representations of $f$-divergences are central to many machine learning algorithms, with Lipschitz constrained variants recently gaining attention. Inspired by this, we define the Moreau-Yosida approximation of $f$-divergences with respect to the Wasserstein-$1$ metric. The corresponding variational formulas provide a generalization of a number of recent results, novel special cases of interest and a relaxation of the hard Lipschitz constraint. Additionally, we prove that the so-called tight variational representation of $f$-divergences can be to be taken over the quotient space of Lipschitz functions, and give a characterization of functions achieving the supremum in the variational representation. On the practical side, we propose an algorithm to calculate the tight convex conjugate of $f$-divergences compatible with automatic differentiation frameworks. As an application of our results, we propose the Moreau-Yosida $f$-GAN, providing an implementation of the variational formulas for the Kullback-Leibler, reverse Kullback-Leibler, $\\chi^2$, reverse $\\chi^2$, squared Hellinger, Jensen-Shannon, Jeffreys, triangular discrimination and total variation divergences as GANs trained on CIFAR-10, leading to competitive results and a simple solution to the problem of uniqueness of the optimal critic.", "bibtex": "@InProceedings{pmlr-v139-terjek21a,\n title = \t {Moreau-Yosida $f$-divergences},\n author = {Terj{\\'e}k, D{\\'a}vid},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10214--10224},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/terjek21a/terjek21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/terjek21a.html},\n abstract = \t {Variational representations of $f$-divergences are central to many machine learning algorithms, with Lipschitz constrained variants recently gaining attention. Inspired by this, we define the Moreau-Yosida approximation of $f$-divergences with respect to the Wasserstein-$1$ metric. The corresponding variational formulas provide a generalization of a number of recent results, novel special cases of interest and a relaxation of the hard Lipschitz constraint. Additionally, we prove that the so-called tight variational representation of $f$-divergences can be to be taken over the quotient space of Lipschitz functions, and give a characterization of functions achieving the supremum in the variational representation. On the practical side, we propose an algorithm to calculate the tight convex conjugate of $f$-divergences compatible with automatic differentiation frameworks. As an application of our results, we propose the Moreau-Yosida $f$-GAN, providing an implementation of the variational formulas for the Kullback-Leibler, reverse Kullback-Leibler, $\\chi^2$, reverse $\\chi^2$, squared Hellinger, Jensen-Shannon, Jeffreys, triangular discrimination and total variation divergences as GANs trained on CIFAR-10, leading to competitive results and a simple solution to the problem of uniqueness of the optimal critic.}\n}", "pdf": "http://proceedings.mlr.press/v139/terjek21a/terjek21a.pdf", "supp": "", "pdf_size": 421158, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3652869154522690970&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Alfr\u00b4ed R \u00b4enyi Institute of Mathematics, Budapest, Hungary", "aff_domain": "renyi.hu", "email": "renyi.hu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/terjek21a.html", "aff_unique_index": "0", "aff_unique_norm": "Alfr\u00e9d R\u00e9nyi Institute of Mathematics", "aff_unique_dep": "Institute of Mathematics", "aff_unique_url": "https://www.renyi.hu", "aff_unique_abbr": "R\u00e9nyi Institute", "aff_country_unique_index": "0", "aff_country_unique": "Hungary" }, { "title": "MorphVAE: Generating Neural Morphologies from 3D-Walks using a Variational Autoencoder with Spherical Latent Space", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10059", "id": "10059", "proceeding": "http://proceedings.mlr.press/v139/laturnus21a.html", "slides": "", "author_site": "Sophie Laturnus, Philipp Berens", "author": "Sophie C. Laturnus; Philipp Berens", "abstract": "For the past century, the anatomy of a neuron has been considered one of its defining features: The shape of a neuron\u2019s dendrites and axon fundamentally determines what other neurons it can connect to. These neurites have been described using mathematical tools e.g. in the context of cell type classification, but generative models of these structures have only rarely been proposed and are often computationally inefficient. Here we propose MorphVAE, a sequence-to-sequence variational autoencoder with spherical latent space as a generative model for neural morphologies. The model operates on walks within the tree structure of a neuron and can incorporate expert annotations on a subset of the data using semi-supervised learning. We develop our model on artificially generated toy data and evaluate its performance on dendrites of excitatory cells and axons of inhibitory cells of mouse motor cortex (M1) and dendrites of retinal ganglion cells. We show that the learned latent feature space allows for better cell type discrimination than other commonly used features. By sampling new walks from the latent space we can easily construct new morphologies with a specified degree of similarity to their reference neuron, providing an efficient generative model for neural morphologies.", "bibtex": "@InProceedings{pmlr-v139-laturnus21a,\n title = \t {MorphVAE: Generating Neural Morphologies from 3D-Walks using a Variational Autoencoder with Spherical Latent Space},\n author = {Laturnus, Sophie C. and Berens, Philipp},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6021--6031},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/laturnus21a/laturnus21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/laturnus21a.html},\n abstract = \t {For the past century, the anatomy of a neuron has been considered one of its defining features: The shape of a neuron\u2019s dendrites and axon fundamentally determines what other neurons it can connect to. These neurites have been described using mathematical tools e.g. in the context of cell type classification, but generative models of these structures have only rarely been proposed and are often computationally inefficient. Here we propose MorphVAE, a sequence-to-sequence variational autoencoder with spherical latent space as a generative model for neural morphologies. The model operates on walks within the tree structure of a neuron and can incorporate expert annotations on a subset of the data using semi-supervised learning. We develop our model on artificially generated toy data and evaluate its performance on dendrites of excitatory cells and axons of inhibitory cells of mouse motor cortex (M1) and dendrites of retinal ganglion cells. We show that the learned latent feature space allows for better cell type discrimination than other commonly used features. By sampling new walks from the latent space we can easily construct new morphologies with a specified degree of similarity to their reference neuron, providing an efficient generative model for neural morphologies.}\n}", "pdf": "http://proceedings.mlr.press/v139/laturnus21a/laturnus21a.pdf", "supp": "", "pdf_size": 7959831, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=520974170624884160&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Institute for Ophthalmic Research, University of T\u00fcbingen, Germany+Center for Integrative Neuroscience, University of T\u00fcbingen, Germany+T\u00fcbingen AI Center, Germany; Institute for Ophthalmic Research, University of T\u00fcbingen, Germany+Center for Integrative Neuroscience, University of T\u00fcbingen, Germany+T\u00fcbingen AI Center, Germany", "aff_domain": "uni-tuebingen.de;uni-tuebingen.de", "email": "uni-tuebingen.de;uni-tuebingen.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/laturnus21a.html", "aff_unique_index": "0+0+1;0+0+1", "aff_unique_norm": "University of T\u00fcbingen;T\u00fcbingen AI Center", "aff_unique_dep": "Institute for Ophthalmic Research;AI Center", "aff_unique_url": "https://www.uni-tuebingen.de;", "aff_unique_abbr": ";", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0+0", "aff_country_unique": "Germany" }, { "title": "Muesli: Combining Improvements in Policy Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10769", "id": "10769", "proceeding": "http://proceedings.mlr.press/v139/hessel21a.html", "slides": "/media/icml-2021/Slides/10769.pdf", "author_site": "Matteo Hessel, Ivo Danihelka, Fabio Viola, Arthur Guez, Simon Schmitt, Laurent Sifre, Theophane Weber, David Silver, Hado van Hasselt", "author": "Matteo Hessel; Ivo Danihelka; Fabio Viola; Arthur Guez; Simon Schmitt; Laurent Sifre; Theophane Weber; David Silver; Hado Van Hasselt", "abstract": "We propose a novel policy update that combines regularized policy optimization with model learning as an auxiliary loss. The update (henceforth Muesli) matches MuZero\u2019s state-of-the-art performance on Atari. Notably, Muesli does so without using deep search: it acts directly with a policy network and has computation speed comparable to model-free baselines. The Atari results are complemented by extensive ablations, and by additional results on continuous control and 9x9 Go.", "bibtex": "@InProceedings{pmlr-v139-hessel21a,\n title = \t {Muesli: Combining Improvements in Policy Optimization},\n author = {Hessel, Matteo and Danihelka, Ivo and Viola, Fabio and Guez, Arthur and Schmitt, Simon and Sifre, Laurent and Weber, Theophane and Silver, David and Van Hasselt, Hado},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4214--4226},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hessel21a/hessel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hessel21a.html},\n abstract = \t {We propose a novel policy update that combines regularized policy optimization with model learning as an auxiliary loss. The update (henceforth Muesli) matches MuZero\u2019s state-of-the-art performance on Atari. Notably, Muesli does so without using deep search: it acts directly with a policy network and has computation speed comparable to model-free baselines. The Atari results are complemented by extensive ablations, and by additional results on continuous control and 9x9 Go.}\n}", "pdf": "http://proceedings.mlr.press/v139/hessel21a/hessel21a.pdf", "supp": "", "pdf_size": 994052, "gs_citation": 92, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14805934246707658240&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, UK; DeepMind, London, UK + University College London; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK + University College London; DeepMind, London, UK", "aff_domain": "google.com;google.com; ; ; ; ; ;google.com; ", "email": "google.com;google.com; ; ; ; ; ;google.com; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/hessel21a.html", "aff_unique_index": "0;0+1;0;0;0;0;0;0+1;0", "aff_unique_norm": "DeepMind;University College London", "aff_unique_dep": ";", "aff_unique_url": "https://deepmind.com;https://www.ucl.ac.uk", "aff_unique_abbr": "DeepMind;UCL", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;0+0;0;0;0;0;0;0+0;0", "aff_country_unique": "United Kingdom" }, { "title": "Multi-Agent Training beyond Zero-Sum with Correlated Equilibrium Meta-Solvers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10003", "id": "10003", "proceeding": "http://proceedings.mlr.press/v139/marris21a.html", "slides": "/media/icml-2021/Slides/10003.pdf", "author_site": "Luke Marris, Paul Muller, Marc Lanctot, Karl Tuyls, Thore Graepel", "author": "Luke Marris; Paul Muller; Marc Lanctot; Karl Tuyls; Thore Graepel", "abstract": "Two-player, constant-sum games are well studied in the literature, but there has been limited progress outside of this setting. We propose Joint Policy-Space Response Oracles (JPSRO), an algorithm for training agents in n-player, general-sum extensive form games, which provably converges to an equilibrium. We further suggest correlated equilibria (CE) as promising meta-solvers, and propose a novel solution concept Maximum Gini Correlated Equilibrium (MGCE), a principled and computationally efficient family of solutions for solving the correlated equilibrium selection problem. We conduct several experiments using CE meta-solvers for JPSRO and demonstrate convergence on n-player, general-sum games.", "bibtex": "@InProceedings{pmlr-v139-marris21a,\n title = \t {Multi-Agent Training beyond Zero-Sum with Correlated Equilibrium Meta-Solvers},\n author = {Marris, Luke and Muller, Paul and Lanctot, Marc and Tuyls, Karl and Graepel, Thore},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7480--7491},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/marris21a/marris21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/marris21a.html},\n abstract = \t {Two-player, constant-sum games are well studied in the literature, but there has been limited progress outside of this setting. We propose Joint Policy-Space Response Oracles (JPSRO), an algorithm for training agents in n-player, general-sum extensive form games, which provably converges to an equilibrium. We further suggest correlated equilibria (CE) as promising meta-solvers, and propose a novel solution concept Maximum Gini Correlated Equilibrium (MGCE), a principled and computationally efficient family of solutions for solving the correlated equilibrium selection problem. We conduct several experiments using CE meta-solvers for JPSRO and demonstrate convergence on n-player, general-sum games.}\n}", "pdf": "http://proceedings.mlr.press/v139/marris21a/marris21a.pdf", "supp": "", "pdf_size": 2248022, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13991149676180937828&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "DeepMind+University College London; DeepMind+Universit\u00e9 Gustave Eiffel; DeepMind; DeepMind; DeepMind+University College London", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/marris21a.html", "aff_unique_index": "0+1;0+2;0;0;0+1", "aff_unique_norm": "DeepMind;University College London;Universit\u00e9 Gustave Eiffel", "aff_unique_dep": ";;", "aff_unique_url": "https://deepmind.com;https://www.ucl.ac.uk;https://www.univ-gustave-eiffel.fr", "aff_unique_abbr": "DeepMind;UCL;UGE", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+1;0;0;0+0", "aff_country_unique": "United Kingdom;France" }, { "title": "Multi-Dimensional Classification via Sparse Label Encoding", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10561", "id": "10561", "proceeding": "http://proceedings.mlr.press/v139/jia21c.html", "slides": "/media/icml-2021/Slides/10561_wNcK8R2.pdf", "author_site": "BINBIN JIA, Min-Ling Zhang", "author": "Bin-Bin Jia; Min-Ling Zhang", "abstract": "In multi-dimensional classification (MDC), there are multiple class variables in the output space with each of them corresponding to one heterogeneous class space. Due to the heterogeneity of class spaces, it is quite challenging to consider the dependencies among class variables when learning from MDC examples. In this paper, we propose a novel MDC approach named SLEM which learns the predictive model in an encoded label space instead of the original heterogeneous one. Specifically, SLEM works in an encoding-training-decoding framework. In the encoding phase, each class vector is mapped into a real-valued one via three cascaded operations including pairwise grouping, one-hot conversion and sparse linear encoding. In the training phase, a multi-output regression model is learned within the encoded label space. In the decoding phase, the predicted class vector is obtained by adapting orthogonal matching pursuit over outputs of the learned multi-output regression model. Experimental results clearly validate the superiority of SLEM against state-of-the-art MDC approaches.", "bibtex": "@InProceedings{pmlr-v139-jia21c,\n title = \t {Multi-Dimensional Classification via Sparse Label Encoding},\n author = {Jia, Bin-Bin and Zhang, Min-Ling},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4917--4926},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jia21c/jia21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/jia21c.html},\n abstract = \t {In multi-dimensional classification (MDC), there are multiple class variables in the output space with each of them corresponding to one heterogeneous class space. Due to the heterogeneity of class spaces, it is quite challenging to consider the dependencies among class variables when learning from MDC examples. In this paper, we propose a novel MDC approach named SLEM which learns the predictive model in an encoded label space instead of the original heterogeneous one. Specifically, SLEM works in an encoding-training-decoding framework. In the encoding phase, each class vector is mapped into a real-valued one via three cascaded operations including pairwise grouping, one-hot conversion and sparse linear encoding. In the training phase, a multi-output regression model is learned within the encoded label space. In the decoding phase, the predicted class vector is obtained by adapting orthogonal matching pursuit over outputs of the learned multi-output regression model. Experimental results clearly validate the superiority of SLEM against state-of-the-art MDC approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/jia21c/jia21c.pdf", "supp": "", "pdf_size": 454112, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12399944475074503332&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Computer Science and Engineering, Southeast University, Nanjing 210096, China+College of Electrical and Information Engineering, Lanzhou University of Technology, Lanzhou 730050, China; Key Lab. of Computer Network and Information Integration (Southeast University), Ministry of Education, China", "aff_domain": "seu.edu.cn; ", "email": "seu.edu.cn; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jia21c.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "Southeast University;Lanzhou University of Technology", "aff_unique_dep": "School of Computer Science and Engineering;College of Electrical and Information Engineering", "aff_unique_url": "https://www.seu.edu.cn/;", "aff_unique_abbr": "SEU;", "aff_campus_unique_index": "0+1", "aff_campus_unique": "Nanjing;Lanzhou;", "aff_country_unique_index": "0+0;0", "aff_country_unique": "China" }, { "title": "Multi-Receiver Online Bayesian Persuasion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9107", "id": "9107", "proceeding": "http://proceedings.mlr.press/v139/castiglioni21a.html", "slides": "", "author_site": "Matteo Castiglioni, Alberto Marchesi, Andrea Celli, Nicola Gatti", "author": "Matteo Castiglioni; Alberto Marchesi; Andrea Celli; Nicola Gatti", "abstract": "Bayesian persuasion studies how an informed sender should partially disclose information to influence the behavior of a self-interested receiver. Classical models make the stringent assumption that the sender knows the receiver\u2019s utility. This can be relaxed by considering an online learning framework in which the sender repeatedly faces a receiver of an unknown, adversarially selected type. We study, for the first time, an online Bayesian persuasion setting with multiple receivers. We focus on the case with no externalities and binary actions, as customary in offline models. Our goal is to design no-regret algorithms for the sender with polynomial per-iteration running time. First, we prove a negative result: for any 0 < $\\alpha$ $\\leq$ 1, there is no polynomial-time no-$\\alpha$-regret algorithm when the sender\u2019s utility function is supermodular or anonymous. Then, we focus on the setting of submodular sender\u2019s utility functions and we show that, in this case, it is possible to design a polynomial-time no-(1-1/e)-regret algorithm. To do so, we introduce a general online gradient descent framework to handle online learning problems with a finite number of possible loss functions. This requires the existence of an approximate projection oracle. We show that, in our setting, there exists one such projection oracle which can be implemented in polynomial time.", "bibtex": "@InProceedings{pmlr-v139-castiglioni21a,\n title = \t {Multi-Receiver Online Bayesian Persuasion},\n author = {Castiglioni, Matteo and Marchesi, Alberto and Celli, Andrea and Gatti, Nicola},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1314--1323},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/castiglioni21a/castiglioni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/castiglioni21a.html},\n abstract = \t {Bayesian persuasion studies how an informed sender should partially disclose information to influence the behavior of a self-interested receiver. Classical models make the stringent assumption that the sender knows the receiver\u2019s utility. This can be relaxed by considering an online learning framework in which the sender repeatedly faces a receiver of an unknown, adversarially selected type. We study, for the first time, an online Bayesian persuasion setting with multiple receivers. We focus on the case with no externalities and binary actions, as customary in offline models. Our goal is to design no-regret algorithms for the sender with polynomial per-iteration running time. First, we prove a negative result: for any 0 < $\\alpha$ $\\leq$ 1, there is no polynomial-time no-$\\alpha$-regret algorithm when the sender\u2019s utility function is supermodular or anonymous. Then, we focus on the setting of submodular sender\u2019s utility functions and we show that, in this case, it is possible to design a polynomial-time no-(1-1/e)-regret algorithm. To do so, we introduce a general online gradient descent framework to handle online learning problems with a finite number of possible loss functions. This requires the existence of an approximate projection oracle. We show that, in our setting, there exists one such projection oracle which can be implemented in polynomial time.}\n}", "pdf": "http://proceedings.mlr.press/v139/castiglioni21a/castiglioni21a.pdf", "supp": "", "pdf_size": 413297, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15541335455208985813&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Politecnico di Milano; Politecnico di Milano; Politecnico di Milano; Politecnico di Milano", "aff_domain": "polimi.it; ; ; ", "email": "polimi.it; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/castiglioni21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Politecnico di Milano", "aff_unique_dep": "", "aff_unique_url": "https://www.polimi.it", "aff_unique_abbr": "Polimi", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Italy" }, { "title": "Multi-Task Reinforcement Learning with Context-based Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10101", "id": "10101", "proceeding": "http://proceedings.mlr.press/v139/sodhani21a.html", "slides": "", "author_site": "Shagun Sodhani, Amy Zhang, Joelle Pineau", "author": "Shagun Sodhani; Amy Zhang; Joelle Pineau", "abstract": "https://drive.google.com/file/d/1lRV72XaKoxZjgQrLXBJhsM82x54_1Vc4/view?usp=sharing", "bibtex": "@InProceedings{pmlr-v139-sodhani21a,\n title = \t {Multi-Task Reinforcement Learning with Context-based Representations},\n author = {Sodhani, Shagun and Zhang, Amy and Pineau, Joelle},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9767--9779},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sodhani21a/sodhani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sodhani21a.html},\n abstract = \t {https://drive.google.com/file/d/1lRV72XaKoxZjgQrLXBJhsM82x54_1Vc4/view?usp=sharing}\n}", "pdf": "http://proceedings.mlr.press/v139/sodhani21a/sodhani21a.pdf", "supp": "", "pdf_size": 1719536, "gs_citation": 242, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8978263240984002856&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Facebook AI Research + Mila + McGill University; Facebook AI Research + Mila + McGill University; Facebook AI Research + Mila + McGill University", "aff_domain": "fb.com; ; ", "email": "fb.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sodhani21a.html", "aff_unique_index": "0+1+2;0+1+2;0+1+2", "aff_unique_norm": "Meta;Mila;McGill University", "aff_unique_dep": "Facebook AI Research;Quebec Artificial Intelligence Institute;", "aff_unique_url": "https://research.facebook.com;https://mila.quebec;https://www.mcgill.ca", "aff_unique_abbr": "FAIR;Mila;McGill", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+1+1;0+1+1;0+1+1", "aff_country_unique": "United States;Canada" }, { "title": "Multi-group Agnostic PAC Learnability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10535", "id": "10535", "proceeding": "http://proceedings.mlr.press/v139/rothblum21a.html", "slides": "", "author_site": "Guy Rothblum, Gal Yona", "author": "Guy N Rothblum; Gal Yona", "abstract": "An agnostic PAC learning algorithm finds a predictor that is competitive with the best predictor in a benchmark hypothesis class, where competitiveness is measured with respect to a given loss function. However, its predictions might be quite sub-optimal for structured subgroups of individuals, such as protected demographic groups. Motivated by such fairness concerns, we study \u201cmulti-group agnostic PAC learnability\u201d: fixing a measure of loss, a benchmark class $\\H$ and a (potentially) rich collection of subgroups $\\G$, the objective is to learn a single predictor such that the loss experienced by every group $g \\in \\G$ is not much larger than the best possible loss for this group within $\\H$. Under natural conditions, we provide a characterization of the loss functions for which such a predictor is guaranteed to exist. For any such loss function we construct a learning algorithm whose sample complexity is logarithmic in the size of the collection $\\G$. Our results unify and extend previous positive and negative results from the multi-group fairness literature, which applied for specific loss functions.", "bibtex": "@InProceedings{pmlr-v139-rothblum21a,\n title = \t {Multi-group Agnostic PAC Learnability},\n author = {Rothblum, Guy N and Yona, Gal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9107--9115},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rothblum21a/rothblum21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rothblum21a.html},\n abstract = \t {An agnostic PAC learning algorithm finds a predictor that is competitive with the best predictor in a benchmark hypothesis class, where competitiveness is measured with respect to a given loss function. However, its predictions might be quite sub-optimal for structured subgroups of individuals, such as protected demographic groups. Motivated by such fairness concerns, we study \u201cmulti-group agnostic PAC learnability\u201d: fixing a measure of loss, a benchmark class $\\H$ and a (potentially) rich collection of subgroups $\\G$, the objective is to learn a single predictor such that the loss experienced by every group $g \\in \\G$ is not much larger than the best possible loss for this group within $\\H$. Under natural conditions, we provide a characterization of the loss functions for which such a predictor is guaranteed to exist. For any such loss function we construct a learning algorithm whose sample complexity is logarithmic in the size of the collection $\\G$. Our results unify and extend previous positive and negative results from the multi-group fairness literature, which applied for specific loss functions.}\n}", "pdf": "http://proceedings.mlr.press/v139/rothblum21a/rothblum21a.pdf", "supp": "", "pdf_size": 248286, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1736884053321834019&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Weizmann Institute of Science; Weizmann Institute of Science", "aff_domain": "alum.mit.edu;weizmann.ac.il", "email": "alum.mit.edu;weizmann.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/rothblum21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Weizmann Institute of Science", "aff_unique_dep": "", "aff_unique_url": "https://www.weizmann.org.il", "aff_unique_abbr": "Weizmann", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Multi-layered Network Exploration via Random Walks: From Offline Optimization to Online Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8749", "id": "8749", "proceeding": "http://proceedings.mlr.press/v139/liu21ae.html", "slides": "/media/icml-2021/Slides/8749.pdf", "author_site": "Xutong Liu, Jinhang Zuo, Xiaowei Chen, Wei Chen, John C. S. Lui", "author": "Xutong Liu; Jinhang Zuo; Xiaowei Chen; Wei Chen; John C. S. Lui", "abstract": "Multi-layered network exploration (MuLaNE) problem is an important problem abstracted from many applications. In MuLaNE, there are multiple network layers where each node has an importance weight and each layer is explored by a random walk. The MuLaNE task is to allocate total random walk budget $B$ into each network layer so that the total weights of the unique nodes visited by random walks are maximized. We systematically study this problem from offline optimization to online learning. For the offline optimization setting where the network structure and node weights are known, we provide greedy based constant-ratio approximation algorithms for overlapping networks, and greedy or dynamic-programming based optimal solutions for non-overlapping networks. For the online learning setting, neither the network structure nor the node weights are known initially. We adapt the combinatorial multi-armed bandit framework and design algorithms to learn random walk related parameters and node weights while optimizing the budget allocation in multiple rounds, and prove that they achieve logarithmic regret bounds. Finally, we conduct experiments on a real-world social network dataset to validate our theoretical results.", "bibtex": "@InProceedings{pmlr-v139-liu21ae,\n title = \t {Multi-layered Network Exploration via Random Walks: From Offline Optimization to Online Learning},\n author = {Liu, Xutong and Zuo, Jinhang and Chen, Xiaowei and Chen, Wei and Lui, John C. S.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7057--7066},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21ae/liu21ae.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21ae.html},\n abstract = \t {Multi-layered network exploration (MuLaNE) problem is an important problem abstracted from many applications. In MuLaNE, there are multiple network layers where each node has an importance weight and each layer is explored by a random walk. The MuLaNE task is to allocate total random walk budget $B$ into each network layer so that the total weights of the unique nodes visited by random walks are maximized. We systematically study this problem from offline optimization to online learning. For the offline optimization setting where the network structure and node weights are known, we provide greedy based constant-ratio approximation algorithms for overlapping networks, and greedy or dynamic-programming based optimal solutions for non-overlapping networks. For the online learning setting, neither the network structure nor the node weights are known initially. We adapt the combinatorial multi-armed bandit framework and design algorithms to learn random walk related parameters and node weights while optimizing the budget allocation in multiple rounds, and prove that they achieve logarithmic regret bounds. Finally, we conduct experiments on a real-world social network dataset to validate our theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21ae/liu21ae.pdf", "supp": "", "pdf_size": 545418, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3646967776212189972&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong SAR, China; Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA; Bytedance, Mountain View, CA, USA; Microsoft Research, Beijing, China; Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong SAR, China", "aff_domain": "cse.cuhk.edu.hk; ; ;microsoft.com;cse.cuhk.edu.hk", "email": "cse.cuhk.edu.hk; ; ;microsoft.com;cse.cuhk.edu.hk", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/liu21ae.html", "aff_unique_index": "0;1;2;3;0", "aff_unique_norm": "Chinese University of Hong Kong;Carnegie Mellon University;ByteDance;Microsoft", "aff_unique_dep": "Department of Computer Science and Engineering;Department of Electrical and Computer Engineering;;Microsoft Research", "aff_unique_url": "https://www.cuhk.edu.hk;https://www.cmu.edu;https://www.bytedance.com;https://www.microsoft.com/en-us/research/group/microsoft-research-asia", "aff_unique_abbr": "CUHK;CMU;;MSR", "aff_campus_unique_index": "0;1;2;3;0", "aff_campus_unique": "Hong Kong SAR;Pittsburgh;Mountain View;Beijing", "aff_country_unique_index": "0;1;1;0;0", "aff_country_unique": "China;United States" }, { "title": "Multidimensional Scaling: Approximation and Complexity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9993", "id": "9993", "proceeding": "http://proceedings.mlr.press/v139/demaine21a.html", "slides": "", "author_site": "Erik Demaine, Adam C Hesterberg, Frederic Koehler, Jayson Lynch, John C Urschel", "author": "Erik Demaine; Adam Hesterberg; Frederic Koehler; Jayson Lynch; John Urschel", "abstract": "Metric Multidimensional scaling (MDS) is a classical method for generating meaningful (non-linear) low-dimensional embeddings of high-dimensional data. MDS has a long history in the statistics, machine learning, and graph drawing communities. In particular, the Kamada-Kawai force-directed graph drawing method is equivalent to MDS and is one of the most popular ways in practice to embed graphs into low dimensions. Despite its ubiquity, our theoretical understanding of MDS remains limited as its objective function is highly non-convex. In this paper, we prove that minimizing the Kamada-Kawai objective is NP-hard and give a provable approximation algorithm for optimizing it, which in particular is a PTAS on low-diameter graphs. We supplement this result with experiments suggesting possible connections between our greedy approximation algorithm and gradient-based methods.", "bibtex": "@InProceedings{pmlr-v139-demaine21a,\n title = \t {Multidimensional Scaling: Approximation and Complexity},\n author = {Demaine, Erik and Hesterberg, Adam and Koehler, Frederic and Lynch, Jayson and Urschel, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2568--2578},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/demaine21a/demaine21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/demaine21a.html},\n abstract = \t {Metric Multidimensional scaling (MDS) is a classical method for generating meaningful (non-linear) low-dimensional embeddings of high-dimensional data. MDS has a long history in the statistics, machine learning, and graph drawing communities. In particular, the Kamada-Kawai force-directed graph drawing method is equivalent to MDS and is one of the most popular ways in practice to embed graphs into low dimensions. Despite its ubiquity, our theoretical understanding of MDS remains limited as its objective function is highly non-convex. In this paper, we prove that minimizing the Kamada-Kawai objective is NP-hard and give a provable approximation algorithm for optimizing it, which in particular is a PTAS on low-diameter graphs. We supplement this result with experiments suggesting possible connections between our greedy approximation algorithm and gradient-based methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/demaine21a/demaine21a.pdf", "supp": "", "pdf_size": 2441444, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=850860243193402473&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Computer Science and Artificial Intelligence Laboratory, MIT, Cambridge, MA, USA; John A. Paulson School of Engineering and Applied Sciences, Harvard University, Cambridge, MA, USA; Department of Mathematics, MIT, Cambridge, MA, USA+Department of Mathematics, MIT, Cambridge, MA, USA; Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada; Department of Mathematics, MIT, Cambridge, MA, USA", "aff_domain": "mit.edu;seas.harvard.edu;mit.edu;uwaterloo.ca;mit.edu", "email": "mit.edu;seas.harvard.edu;mit.edu;uwaterloo.ca;mit.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/demaine21a.html", "aff_unique_index": "0;1;0+0;2;0", "aff_unique_norm": "Massachusetts Institute of Technology;Harvard University;University of Waterloo", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;John A. Paulson School of Engineering and Applied Sciences;Cheriton School of Computer Science", "aff_unique_url": "https://web.mit.edu;https://www.harvard.edu;https://uwaterloo.ca", "aff_unique_abbr": "MIT;Harvard;UW", "aff_campus_unique_index": "0;0;0+0;1;0", "aff_campus_unique": "Cambridge;Waterloo", "aff_country_unique_index": "0;0;0+0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "Multiplicative Noise and Heavy Tails in Stochastic Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9187", "id": "9187", "proceeding": "http://proceedings.mlr.press/v139/hodgkinson21a.html", "slides": "/media/icml-2021/Slides/9187.pdf", "author_site": "Liam Hodgkinson, Michael Mahoney", "author": "Liam Hodgkinson; Michael Mahoney", "abstract": "Although stochastic optimization is central to modern machine learning, the precise mechanisms underlying its success, and in particular, the precise role of the stochasticity, still remain unclear. Modeling stochastic optimization algorithms as discrete random recurrence relations, we show that multiplicative noise, as it commonly arises due to variance in local rates of convergence, results in heavy-tailed stationary behaviour in the parameters. Theoretical results are obtained characterizing this for a large class of (non-linear and even non-convex) models and optimizers (including momentum, Adam, and stochastic Newton), demonstrating that this phenomenon holds generally. We describe dependence on key factors, including step size, batch size, and data variability, all of which exhibit similar qualitative behavior to recent empirical results on state-of-the-art neural network models. Furthermore, we empirically illustrate how multiplicative noise and heavy-tailed structure improve capacity for basin hopping and exploration of non-convex loss surfaces, over commonly-considered stochastic dynamics with only additive noise and light-tailed structure.", "bibtex": "@InProceedings{pmlr-v139-hodgkinson21a,\n title = \t {Multiplicative Noise and Heavy Tails in Stochastic Optimization},\n author = {Hodgkinson, Liam and Mahoney, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4262--4274},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hodgkinson21a/hodgkinson21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hodgkinson21a.html},\n abstract = \t {Although stochastic optimization is central to modern machine learning, the precise mechanisms underlying its success, and in particular, the precise role of the stochasticity, still remain unclear. Modeling stochastic optimization algorithms as discrete random recurrence relations, we show that multiplicative noise, as it commonly arises due to variance in local rates of convergence, results in heavy-tailed stationary behaviour in the parameters. Theoretical results are obtained characterizing this for a large class of (non-linear and even non-convex) models and optimizers (including momentum, Adam, and stochastic Newton), demonstrating that this phenomenon holds generally. We describe dependence on key factors, including step size, batch size, and data variability, all of which exhibit similar qualitative behavior to recent empirical results on state-of-the-art neural network models. Furthermore, we empirically illustrate how multiplicative noise and heavy-tailed structure improve capacity for basin hopping and exploration of non-convex loss surfaces, over commonly-considered stochastic dynamics with only additive noise and light-tailed structure.}\n}", "pdf": "http://proceedings.mlr.press/v139/hodgkinson21a/hodgkinson21a.pdf", "supp": "", "pdf_size": 1462021, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9181709998758391562&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "ICSI and Department of Statistics, University of California, Berkeley, USA; ICSI and Department of Statistics, University of California, Berkeley, USA", "aff_domain": "berkeley.edu; ", "email": "berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hodgkinson21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Multiplying Matrices Without Multiplying", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10633", "id": "10633", "proceeding": "http://proceedings.mlr.press/v139/blalock21a.html", "slides": "", "author_site": "Davis Blalock, John Guttag", "author": "Davis Blalock; John Guttag", "abstract": "Multiplying matrices is among the most fundamental and most computationally demanding operations in machine learning and scientific computing. Consequently, the task of efficiently approximating matrix products has received significant attention. We introduce a learning-based algorithm for this task that greatly outperforms existing methods. Experiments using hundreds of matrices from diverse domains show that it often runs 10x faster than alternatives at a given level of error, as well as 100x faster than exact matrix multiplication. In the common case that one matrix is known ahead of time, our method also has the interesting property that it requires zero multiply-adds. These results suggest that a mixture of hashing, averaging, and byte shuffling{\u2014}the core operations of our method{\u2014}could be a more promising building block for machine learning than the sparsified, factorized, and/or scalar quantized matrix products that have recently been the focus of substantial research and hardware investment.", "bibtex": "@InProceedings{pmlr-v139-blalock21a,\n title = \t {Multiplying Matrices Without Multiplying},\n author = {Blalock, Davis and Guttag, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {992--1004},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/blalock21a/blalock21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/blalock21a.html},\n abstract = \t {Multiplying matrices is among the most fundamental and most computationally demanding operations in machine learning and scientific computing. Consequently, the task of efficiently approximating matrix products has received significant attention. We introduce a learning-based algorithm for this task that greatly outperforms existing methods. Experiments using hundreds of matrices from diverse domains show that it often runs 10x faster than alternatives at a given level of error, as well as 100x faster than exact matrix multiplication. In the common case that one matrix is known ahead of time, our method also has the interesting property that it requires zero multiply-adds. These results suggest that a mixture of hashing, averaging, and byte shuffling{\u2014}the core operations of our method{\u2014}could be a more promising building block for machine learning than the sparsified, factorized, and/or scalar quantized matrix products that have recently been the focus of substantial research and hardware investment.}\n}", "pdf": "http://proceedings.mlr.press/v139/blalock21a/blalock21a.pdf", "supp": "", "pdf_size": 4524795, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16672894839769153249&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "MosaicML, San Francisco, CA, USA+MIT CSAIL, Cambridge, MA, USA; MIT CSAIL, Cambridge, MA, USA", "aff_domain": "mosaicml.com; ", "email": "mosaicml.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/blalock21a.html", "aff_unique_index": "0+1;1", "aff_unique_norm": "MosaicML;Massachusetts Institute of Technology", "aff_unique_dep": ";Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": ";https://www.csail.mit.edu", "aff_unique_abbr": ";MIT CSAIL", "aff_campus_unique_index": "0+1;1", "aff_campus_unique": "San Francisco;Cambridge", "aff_country_unique_index": "0+0;0", "aff_country_unique": "United States" }, { "title": "Multiscale Invertible Generative Networks for High-Dimensional Bayesian Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8825", "id": "8825", "proceeding": "http://proceedings.mlr.press/v139/zhang21z.html", "slides": "", "author_site": "Shumao Zhang, Pengchuan Zhang, Thomas Hou", "author": "Shumao Zhang; Pengchuan Zhang; Thomas Y Hou", "abstract": "We propose a Multiscale Invertible Generative Network (MsIGN) and associated training algorithm that leverages multiscale structure to solve high-dimensional Bayesian inference. To address the curse of dimensionality, MsIGN exploits the low-dimensional nature of the posterior, and generates samples from coarse to fine scale (low to high dimension) by iteratively upsampling and refining samples. MsIGN is trained in a multi-stage manner to minimize the Jeffreys divergence, which avoids mode dropping in high-dimensional cases. On two high-dimensional Bayesian inverse problems, we show superior performance of MsIGN over previous approaches in posterior approximation and multiple mode capture. On the natural image synthesis task, MsIGN achieves superior performance in bits-per-dimension over baseline models and yields great interpret-ability of its neurons in intermediate layers.", "bibtex": "@InProceedings{pmlr-v139-zhang21z,\n title = \t {Multiscale Invertible Generative Networks for High-Dimensional Bayesian Inference},\n author = {Zhang, Shumao and Zhang, Pengchuan and Hou, Thomas Y},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12632--12641},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21z/zhang21z.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21z.html},\n abstract = \t {We propose a Multiscale Invertible Generative Network (MsIGN) and associated training algorithm that leverages multiscale structure to solve high-dimensional Bayesian inference. To address the curse of dimensionality, MsIGN exploits the low-dimensional nature of the posterior, and generates samples from coarse to fine scale (low to high dimension) by iteratively upsampling and refining samples. MsIGN is trained in a multi-stage manner to minimize the Jeffreys divergence, which avoids mode dropping in high-dimensional cases. On two high-dimensional Bayesian inverse problems, we show superior performance of MsIGN over previous approaches in posterior approximation and multiple mode capture. On the natural image synthesis task, MsIGN achieves superior performance in bits-per-dimension over baseline models and yields great interpret-ability of its neurons in intermediate layers.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21z/zhang21z.pdf", "supp": "", "pdf_size": 6469908, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18275556431326193234&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computational & Mathematical Sciences, Caltech, Pasadena, California, USA; MSR AI Lab, Redmond, Washington, USA; Department of Computational & Mathematical Sciences, Caltech, Pasadena, California, USA", "aff_domain": "caltech.edu; ; ", "email": "caltech.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21z.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "California Institute of Technology;Microsoft", "aff_unique_dep": "Department of Computational & Mathematical Sciences;AI Lab", "aff_unique_url": "https://www.caltech.edu;https://www.microsoft.com/en-us/research/labs/microsoft-research-lab-redmond", "aff_unique_abbr": "Caltech;MSR", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Pasadena;Redmond", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Narrow Margins: Classification, Margins and Fat Tails", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8803", "id": "8803", "proceeding": "http://proceedings.mlr.press/v139/buet-golfouse21a.html", "slides": "", "author": "Francois Buet-Golfouse", "abstract": "It is well-known that, for separable data, the regularised two-class logistic regression or support vector machine re-normalised estimate converges to the maximal margin classifier as the regularisation hyper-parameter $\\lambda$ goes to 0. The fact that different loss functions may lead to the same solution is of theoretical and practical relevance as margin maximisation allows more straightforward considerations in terms of generalisation and geometric interpretation. We investigate the case where this convergence property is not guaranteed to hold and show that it can be fully characterised by the distribution of error terms in the latent variable interpretation of linear classifiers. In particular, if errors follow a regularly varying distribution, then the regularised and re-normalised estimate does not converge to the maximal margin classifier. This shows that classification with fat tails has a qualitatively different behaviour, which should be taken into account when considering real-life data.", "bibtex": "@InProceedings{pmlr-v139-buet-golfouse21a,\n title = \t {Narrow Margins: Classification, Margins and Fat Tails},\n author = {Buet-Golfouse, Francois},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1127--1135},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/buet-golfouse21a/buet-golfouse21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/buet-golfouse21a.html},\n abstract = \t {It is well-known that, for separable data, the regularised two-class logistic regression or support vector machine re-normalised estimate converges to the maximal margin classifier as the regularisation hyper-parameter $\\lambda$ goes to 0. The fact that different loss functions may lead to the same solution is of theoretical and practical relevance as margin maximisation allows more straightforward considerations in terms of generalisation and geometric interpretation. We investigate the case where this convergence property is not guaranteed to hold and show that it can be fully characterised by the distribution of error terms in the latent variable interpretation of linear classifiers. In particular, if errors follow a regularly varying distribution, then the regularised and re-normalised estimate does not converge to the maximal margin classifier. This shows that classification with fat tails has a qualitatively different behaviour, which should be taken into account when considering real-life data.}\n}", "pdf": "http://proceedings.mlr.press/v139/buet-golfouse21a/buet-golfouse21a.pdf", "supp": "", "pdf_size": 366720, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17595150164449765637&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 3, "aff": "Department of Mathematics, University College London, London, United Kingdom", "aff_domain": "ucl.ac.uk", "email": "ucl.ac.uk", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/buet-golfouse21a.html", "aff_unique_index": "0", "aff_unique_norm": "University College London", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.ucl.ac.uk", "aff_unique_abbr": "UCL", "aff_campus_unique_index": "0", "aff_campus_unique": "London", "aff_country_unique_index": "0", "aff_country_unique": "United Kingdom" }, { "title": "Navigation Turing Test (NTT): Learning to Evaluate Human-Like Navigation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10031", "id": "10031", "proceeding": "http://proceedings.mlr.press/v139/devlin21a.html", "slides": "", "author_site": "Sam Devlin, Raluca Georgescu, Ida Momennejad, Jaroslaw Rzepecki, Evelyn Zuniga, Gavin Costello, Guy Leroy, Ali Shaw, Katja Hofmann", "author": "Sam Devlin; Raluca Georgescu; Ida Momennejad; Jaroslaw Rzepecki; Evelyn Zuniga; Gavin Costello; Guy Leroy; Ali Shaw; Katja Hofmann", "abstract": "A key challenge on the path to developing agents that learn complex human-like behavior is the need to quickly and accurately quantify human-likeness. While human assessments of such behavior can be highly accurate, speed and scalability are limited. We address these limitations through a novel automated Navigation Turing Test (ANTT) that learns to predict human judgments of human-likeness. We demonstrate the effectiveness of our automated NTT on a navigation task in a complex 3D environment. We investigate six classification models to shed light on the types of architectures best suited to this task, and validate them against data collected through a human NTT. Our best models achieve high accuracy when distinguishing true human and agent behavior. At the same time, we show that predicting finer-grained human assessment of agents\u2019 progress towards human-like behavior remains unsolved. Our work takes an important step towards agents that more effectively learn complex human-like behavior.", "bibtex": "@InProceedings{pmlr-v139-devlin21a,\n title = \t {Navigation Turing Test (NTT): Learning to Evaluate Human-Like Navigation},\n author = {Devlin, Sam and Georgescu, Raluca and Momennejad, Ida and Rzepecki, Jaroslaw and Zuniga, Evelyn and Costello, Gavin and Leroy, Guy and Shaw, Ali and Hofmann, Katja},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2644--2653},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/devlin21a/devlin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/devlin21a.html},\n abstract = \t {A key challenge on the path to developing agents that learn complex human-like behavior is the need to quickly and accurately quantify human-likeness. While human assessments of such behavior can be highly accurate, speed and scalability are limited. We address these limitations through a novel automated Navigation Turing Test (ANTT) that learns to predict human judgments of human-likeness. We demonstrate the effectiveness of our automated NTT on a navigation task in a complex 3D environment. We investigate six classification models to shed light on the types of architectures best suited to this task, and validate them against data collected through a human NTT. Our best models achieve high accuracy when distinguishing true human and agent behavior. At the same time, we show that predicting finer-grained human assessment of agents\u2019 progress towards human-like behavior remains unsolved. Our work takes an important step towards agents that more effectively learn complex human-like behavior.}\n}", "pdf": "http://proceedings.mlr.press/v139/devlin21a/devlin21a.pdf", "supp": "", "pdf_size": 2915190, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1633562910551633122&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Microsoft Research, Cambridge, UK; Microsoft Research, Cambridge, UK; Microsoft Research, Cambridge, UK; Microsoft Research, New York, NY, USA; Microsoft Research, Cambridge, UK; Ninja Theory, Cambridge, UK; Ninja Theory, Cambridge, UK; Ninja Theory, Cambridge, UK; Microsoft Research, Cambridge, UK", "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com; ; ; ;microsoft.com", "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com; ; ; ;microsoft.com", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/devlin21a.html", "aff_unique_index": "0;0;0;0;0;1;1;1;0", "aff_unique_norm": "Microsoft;Ninja Theory", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.ninjatheory.com", "aff_unique_abbr": "MSR;", "aff_campus_unique_index": "0;0;0;1;0;0;0;0;0", "aff_campus_unique": "Cambridge;New York", "aff_country_unique_index": "0;0;0;1;0;0;0;0;0", "aff_country_unique": "United Kingdom;United States" }, { "title": "NeRF-VAE: A Geometry Aware 3D Scene Generative Model", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9067", "id": "9067", "proceeding": "http://proceedings.mlr.press/v139/kosiorek21a.html", "slides": "", "author_site": "Adam Kosiorek, Heiko Strathmann, Daniel Zoran, Pol Moreno, Rosalia Schneider, Sona Mokra, Danilo J. Rezende", "author": "Adam R Kosiorek; Heiko Strathmann; Daniel Zoran; Pol Moreno; Rosalia Schneider; Sona Mokra; Danilo Jimenez Rezende", "abstract": "We propose NeRF-VAE, a 3D scene generative model that incorporates geometric structure via Neural Radiance Fields (NeRF) and differentiable volume rendering. In contrast to NeRF, our model takes into account shared structure across scenes, and is able to infer the structure of a novel scene\u2014without the need to re-train\u2014using amortized inference. NeRF-VAE\u2019s explicit 3D rendering process further contrasts previous generative models with convolution-based rendering which lacks geometric structure. Our model is a VAE that learns a distribution over radiance fields by conditioning them on a latent scene representation. We show that, once trained, NeRF-VAE is able to infer and render geometrically-consistent scenes from previously unseen 3D environments of synthetic scenes using very few input images. We further demonstrate that NeRF-VAE generalizes well to out-of-distribution cameras, while convolutional models do not. Finally, we introduce and study an attention-based conditioning mechanism of NeRF-VAE\u2019s decoder, which improves model performance.", "bibtex": "@InProceedings{pmlr-v139-kosiorek21a,\n title = \t {NeRF-VAE: A Geometry Aware 3D Scene Generative Model},\n author = {Kosiorek, Adam R and Strathmann, Heiko and Zoran, Daniel and Moreno, Pol and Schneider, Rosalia and Mokra, Sona and Rezende, Danilo Jimenez},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5742--5752},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kosiorek21a/kosiorek21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kosiorek21a.html},\n abstract = \t {We propose NeRF-VAE, a 3D scene generative model that incorporates geometric structure via Neural Radiance Fields (NeRF) and differentiable volume rendering. In contrast to NeRF, our model takes into account shared structure across scenes, and is able to infer the structure of a novel scene\u2014without the need to re-train\u2014using amortized inference. NeRF-VAE\u2019s explicit 3D rendering process further contrasts previous generative models with convolution-based rendering which lacks geometric structure. Our model is a VAE that learns a distribution over radiance fields by conditioning them on a latent scene representation. We show that, once trained, NeRF-VAE is able to infer and render geometrically-consistent scenes from previously unseen 3D environments of synthetic scenes using very few input images. We further demonstrate that NeRF-VAE generalizes well to out-of-distribution cameras, while convolutional models do not. Finally, we introduce and study an attention-based conditioning mechanism of NeRF-VAE\u2019s decoder, which improves model performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/kosiorek21a/kosiorek21a.pdf", "supp": "", "pdf_size": 3187677, "gs_citation": 153, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3601848012644504530&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London", "aff_domain": "deepmind.com;deepmind.com; ; ; ; ; ", "email": "deepmind.com;deepmind.com; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/kosiorek21a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Near Optimal Reward-Free Reinforcement Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8597", "id": "8597", "proceeding": "http://proceedings.mlr.press/v139/zhang21e.html", "slides": "/media/icml-2021/Slides/8597.pdf", "author_site": "Zhang Zihan, Simon Du, Xiangyang Ji", "author": "Zihan Zhang; Simon Du; Xiangyang Ji", "abstract": "We study the reward-free reinforcement learning framework, which is particularly suitable for batch reinforcement learning and scenarios where one needs policies for multiple reward functions. This framework has two phases: in the exploration phase, the agent collects trajectories by interacting with the environment without using any reward signal; in the planning phase, the agent needs to return a near-optimal policy for arbitrary reward functions. %This framework is suitable for batch RL setting and the setting where there are multiple reward functions of interes We give a new efficient algorithm, \\textbf{S}taged \\textbf{S}ampling + \\textbf{T}runcated \\textbf{P}lanning (\\algoname), which interacts with the environment at most $O\\left( \\frac{S^2A}{\\epsilon^2}\\poly\\log\\left(\\frac{SAH}{\\epsilon}\\right) \\right)$ episodes in the exploration phase, and guarantees to output a near-optimal policy for arbitrary reward functions in the planning phase, where $S$ is the size of state space, $A$ is the size of action space, $H$ is the planning horizon, and $\\epsilon$ is the target accuracy relative to the total reward. Notably, our sample complexity scales only \\emph{logarithmically} with $H$, in contrast to all existing results which scale \\emph{polynomially} with $H$. Furthermore, this bound matches the minimax lower bound $\\Omega\\left(\\frac{S^2A}{\\epsilon^2}\\right)$ up to logarithmic factors. Our results rely on three new techniques : 1) A new sufficient condition for the dataset to plan for an $\\epsilon$-suboptimal policy % for any totally bounded reward function ; 2) A new way to plan efficiently under the proposed condition using soft-truncated planning; 3) Constructing extended MDP to maximize the truncated accumulative rewards efficiently.", "bibtex": "@InProceedings{pmlr-v139-zhang21e,\n title = \t {Near Optimal Reward-Free Reinforcement Learning},\n author = {Zhang, Zihan and Du, Simon and Ji, Xiangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12402--12412},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21e/zhang21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21e.html},\n abstract = \t {We study the reward-free reinforcement learning framework, which is particularly suitable for batch reinforcement learning and scenarios where one needs policies for multiple reward functions. This framework has two phases: in the exploration phase, the agent collects trajectories by interacting with the environment without using any reward signal; in the planning phase, the agent needs to return a near-optimal policy for arbitrary reward functions. %This framework is suitable for batch RL setting and the setting where there are multiple reward functions of interes We give a new efficient algorithm, \\textbf{S}taged \\textbf{S}ampling + \\textbf{T}runcated \\textbf{P}lanning (\\algoname), which interacts with the environment at most $O\\left( \\frac{S^2A}{\\epsilon^2}\\poly\\log\\left(\\frac{SAH}{\\epsilon}\\right) \\right)$ episodes in the exploration phase, and guarantees to output a near-optimal policy for arbitrary reward functions in the planning phase, where $S$ is the size of state space, $A$ is the size of action space, $H$ is the planning horizon, and $\\epsilon$ is the target accuracy relative to the total reward. Notably, our sample complexity scales only \\emph{logarithmically} with $H$, in contrast to all existing results which scale \\emph{polynomially} with $H$. Furthermore, this bound matches the minimax lower bound $\\Omega\\left(\\frac{S^2A}{\\epsilon^2}\\right)$ up to logarithmic factors. Our results rely on three new techniques : 1) A new sufficient condition for the dataset to plan for an $\\epsilon$-suboptimal policy % for any totally bounded reward function ; 2) A new way to plan efficiently under the proposed condition using soft-truncated planning; 3) Constructing extended MDP to maximize the truncated accumulative rewards efficiently.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21e/zhang21e.pdf", "supp": "", "pdf_size": 729359, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1532708248628184441&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Tsinghua University; University of Washington; Tsinghua University", "aff_domain": "mails.tsinghua.edu.cn;cs.washington.edu;tsinghua.edu.cn", "email": "mails.tsinghua.edu.cn;cs.washington.edu;tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21e.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Tsinghua University;University of Washington", "aff_unique_dep": ";", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.washington.edu", "aff_unique_abbr": "THU;UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "China;United States" }, { "title": "Near-Optimal Algorithms for Explainable k-Medians and k-Means", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9189", "id": "9189", "proceeding": "http://proceedings.mlr.press/v139/makarychev21a.html", "slides": "/media/icml-2021/Slides/9189.pdf", "author_site": "Konstantin Makarychev, Liren Shan", "author": "Konstantin Makarychev; Liren Shan", "abstract": "We consider the problem of explainable $k$-medians and $k$-means introduced by Dasgupta, Frost, Moshkovitz, and Rashtchian\u00a0(ICML 2020). In this problem, our goal is to find a \\emph{threshold decision tree} that partitions data into $k$ clusters and minimizes the $k$-medians or $k$-means objective. The obtained clustering is easy to interpret because every decision node of a threshold tree splits data based on a single feature into two groups. We propose a new algorithm for this problem which is $\\tilde O(\\log k)$ competitive with $k$-medians with $\\ell_1$ norm and $\\tilde O(k)$ competitive with $k$-means. This is an improvement over the previous guarantees of $O(k)$ and $O(k^2)$ by Dasgupta et al (2020). We also provide a new algorithm which is $O(\\log^{\\nicefrac{3}{2}} k)$ competitive for $k$-medians with $\\ell_2$ norm. Our first algorithm is near-optimal: Dasgupta et al (2020) showed a lower bound of $\\Omega(\\log k)$ for $k$-medians; in this work, we prove a lower bound of $\\tilde\\Omega(k)$ for $k$-means. We also provide a lower bound of $\\Omega(\\log k)$ for $k$-medians with $\\ell_2$ norm.", "bibtex": "@InProceedings{pmlr-v139-makarychev21a,\n title = \t {Near-Optimal Algorithms for Explainable k-Medians and k-Means},\n author = {Makarychev, Konstantin and Shan, Liren},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7358--7367},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/makarychev21a/makarychev21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/makarychev21a.html},\n abstract = \t {We consider the problem of explainable $k$-medians and $k$-means introduced by Dasgupta, Frost, Moshkovitz, and Rashtchian\u00a0(ICML 2020). In this problem, our goal is to find a \\emph{threshold decision tree} that partitions data into $k$ clusters and minimizes the $k$-medians or $k$-means objective. The obtained clustering is easy to interpret because every decision node of a threshold tree splits data based on a single feature into two groups. We propose a new algorithm for this problem which is $\\tilde O(\\log k)$ competitive with $k$-medians with $\\ell_1$ norm and $\\tilde O(k)$ competitive with $k$-means. This is an improvement over the previous guarantees of $O(k)$ and $O(k^2)$ by Dasgupta et al (2020). We also provide a new algorithm which is $O(\\log^{\\nicefrac{3}{2}} k)$ competitive for $k$-medians with $\\ell_2$ norm. Our first algorithm is near-optimal: Dasgupta et al (2020) showed a lower bound of $\\Omega(\\log k)$ for $k$-medians; in this work, we prove a lower bound of $\\tilde\\Omega(k)$ for $k$-means. We also provide a lower bound of $\\Omega(\\log k)$ for $k$-medians with $\\ell_2$ norm.}\n}", "pdf": "http://proceedings.mlr.press/v139/makarychev21a/makarychev21a.pdf", "supp": "", "pdf_size": 325271, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10000619393563688696&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Northwestern University; Northwestern University", "aff_domain": "northwestern.edu;u.northwestern.edu", "email": "northwestern.edu;u.northwestern.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/makarychev21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Northwestern University", "aff_unique_dep": "", "aff_unique_url": "https://www.northwestern.edu", "aff_unique_abbr": "NU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Confidence Sequences for Bounded Random Variables", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10463", "id": "10463", "proceeding": "http://proceedings.mlr.press/v139/kuchibhotla21a.html", "slides": "", "author_site": "Arun Kuchibhotla, Qinqing Zheng", "author": "Arun K Kuchibhotla; Qinqing Zheng", "abstract": "Many inference problems, such as sequential decision problems like A/B testing, adaptive sampling schemes like bandit selection, are often online in nature. The fundamental problem for online inference is to provide a sequence of confidence intervals that are valid uniformly over the growing-into-infinity sample sizes. To address this question, we provide a near-optimal confidence sequence for bounded random variables by utilizing Bentkus\u2019 concentration results. We show that it improves on the existing approaches that use the Cram{\u00e9}r-Chernoff technique such as the Hoeffding, Bernstein, and Bennett inequalities. The resulting confidence sequence is confirmed to be favorable in synthetic coverage problems, adaptive stopping algorithms, and multi-armed bandit problems.", "bibtex": "@InProceedings{pmlr-v139-kuchibhotla21a,\n title = \t {Near-Optimal Confidence Sequences for Bounded Random Variables},\n author = {Kuchibhotla, Arun K and Zheng, Qinqing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5827--5837},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kuchibhotla21a/kuchibhotla21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kuchibhotla21a.html},\n abstract = \t {Many inference problems, such as sequential decision problems like A/B testing, adaptive sampling schemes like bandit selection, are often online in nature. The fundamental problem for online inference is to provide a sequence of confidence intervals that are valid uniformly over the growing-into-infinity sample sizes. To address this question, we provide a near-optimal confidence sequence for bounded random variables by utilizing Bentkus\u2019 concentration results. We show that it improves on the existing approaches that use the Cram{\u00e9}r-Chernoff technique such as the Hoeffding, Bernstein, and Bennett inequalities. The resulting confidence sequence is confirmed to be favorable in synthetic coverage problems, adaptive stopping algorithms, and multi-armed bandit problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/kuchibhotla21a/kuchibhotla21a.pdf", "supp": "", "pdf_size": 9537009, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1224018117329927923&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Statistics and Data Science, Carnegie Mellon University; Facebook AI Research", "aff_domain": "stat.cmu.edu;gmail.com", "email": "stat.cmu.edu;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kuchibhotla21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Carnegie Mellon University;Meta", "aff_unique_dep": "Department of Statistics and Data Science;Facebook AI Research", "aff_unique_url": "https://www.cmu.edu;https://research.facebook.com", "aff_unique_abbr": "CMU;FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Entrywise Anomaly Detection for Low-Rank Matrices with Sub-Exponential Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8813", "id": "8813", "proceeding": "http://proceedings.mlr.press/v139/farias21a.html", "slides": "", "author_site": "Vivek Farias, Andrew Li, Tianyi Peng", "author": "Vivek Farias; Andrew A Li; Tianyi Peng", "abstract": "We study the problem of identifying anomalies in a low-rank matrix observed with sub-exponential noise, motivated by applications in retail and inventory management. State of the art approaches to anomaly detection in low-rank matrices apparently fall short, since they require that non-anomalous entries be observed with vanishingly small noise (which is not the case in our problem, and indeed in many applications). So motivated, we propose a conceptually simple entrywise approach to anomaly detection in low-rank matrices. Our approach accommodates a general class of probabilistic anomaly models. We extend recent work on entrywise error guarantees for matrix completion, establishing such guarantees for sub-exponential matrices, where in addition to missing entries, a fraction of entries are corrupted by (an also unknown) anomaly model. Viewing the anomaly detection as a classification task, to the best of our knowledge, we are the first to achieve the min-max optimal detection rate (up to log factors). Using data from a massive consumer goods retailer, we show that our approach provides significant improvements over incumbent approaches to anomaly detection.", "bibtex": "@InProceedings{pmlr-v139-farias21a,\n title = \t {Near-Optimal Entrywise Anomaly Detection for Low-Rank Matrices with Sub-Exponential Noise},\n author = {Farias, Vivek and Li, Andrew A and Peng, Tianyi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3154--3163},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/farias21a/farias21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/farias21a.html},\n abstract = \t {We study the problem of identifying anomalies in a low-rank matrix observed with sub-exponential noise, motivated by applications in retail and inventory management. State of the art approaches to anomaly detection in low-rank matrices apparently fall short, since they require that non-anomalous entries be observed with vanishingly small noise (which is not the case in our problem, and indeed in many applications). So motivated, we propose a conceptually simple entrywise approach to anomaly detection in low-rank matrices. Our approach accommodates a general class of probabilistic anomaly models. We extend recent work on entrywise error guarantees for matrix completion, establishing such guarantees for sub-exponential matrices, where in addition to missing entries, a fraction of entries are corrupted by (an also unknown) anomaly model. Viewing the anomaly detection as a classification task, to the best of our knowledge, we are the first to achieve the min-max optimal detection rate (up to log factors). Using data from a massive consumer goods retailer, we show that our approach provides significant improvements over incumbent approaches to anomaly detection.}\n}", "pdf": "http://proceedings.mlr.press/v139/farias21a/farias21a.pdf", "supp": "", "pdf_size": 5818970, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6691371529677387016&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Sloan School of Management, Massachusetts Institute of Technology, Cambridge, MA 02139, USA; Tepper School of Business, Carnegie Mellon University, Pittsburgh, PA 15213, USA; Department of Aeronautics and Astronautics, Massachusetts Institute of Technology, Cambridge, MA 02139, USA", "aff_domain": "mit.edu; ; ", "email": "mit.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/farias21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Carnegie Mellon University", "aff_unique_dep": "Sloan School of Management;Tepper School of Business", "aff_unique_url": "https://mitsloan.mit.edu/;https://www.cmu.edu", "aff_unique_abbr": "MIT;CMU", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Cambridge;Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Linear Regression under Distribution Shift", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8821", "id": "8821", "proceeding": "http://proceedings.mlr.press/v139/lei21a.html", "slides": "/media/icml-2021/Slides/8821.pdf", "author_site": "Qi Lei, Wei Hu, Jason Lee", "author": "Qi Lei; Wei Hu; Jason Lee", "abstract": "Transfer learning is essential when sufficient data comes from the source domain, with scarce labeled data from the target domain. We develop estimators that achieve minimax linear risk for linear regression problems under distribution shift. Our algorithms cover different transfer learning settings including covariate shift and model shift. We also consider when data are generated from either linear or general nonlinear models. We show that linear minimax estimators are within an absolute constant of the minimax risk even among nonlinear estimators for various source/target distributions.", "bibtex": "@InProceedings{pmlr-v139-lei21a,\n title = \t {Near-Optimal Linear Regression under Distribution Shift},\n author = {Lei, Qi and Hu, Wei and Lee, Jason},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6164--6174},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lei21a/lei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lei21a.html},\n abstract = \t {Transfer learning is essential when sufficient data comes from the source domain, with scarce labeled data from the target domain. We develop estimators that achieve minimax linear risk for linear regression problems under distribution shift. Our algorithms cover different transfer learning settings including covariate shift and model shift. We also consider when data are generated from either linear or general nonlinear models. We show that linear minimax estimators are within an absolute constant of the minimax risk even among nonlinear estimators for various source/target distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/lei21a/lei21a.pdf", "supp": "", "pdf_size": 1325822, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10658000125157298444&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Princeton University; Princeton University; Princeton University", "aff_domain": "princeton.edu;cs.princeton.edu;princeton.edu", "email": "princeton.edu;cs.princeton.edu;princeton.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lei21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Model-Free Reinforcement Learning in Non-Stationary Episodic MDPs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8427", "id": "8427", "proceeding": "http://proceedings.mlr.press/v139/mao21b.html", "slides": "", "author_site": "Weichao Mao, Kaiqing Zhang, Ruihao Zhu, David Simchi-Levi, Tamer Basar", "author": "Weichao Mao; Kaiqing Zhang; Ruihao Zhu; David Simchi-Levi; Tamer Basar", "abstract": "We consider model-free reinforcement learning (RL) in non-stationary Markov decision processes. Both the reward functions and the state transition functions are allowed to vary arbitrarily over time as long as their cumulative variations do not exceed certain variation budgets. We propose Restarted Q-Learning with Upper Confidence Bounds (RestartQ-UCB), the first model-free algorithm for non-stationary RL, and show that it outperforms existing solutions in terms of dynamic regret. Specifically, RestartQ-UCB with Freedman-type bonus terms achieves a dynamic regret bound of $\\widetilde{O}(S^{\\frac{1}{3}} A^{\\frac{1}{3}} \\Delta^{\\frac{1}{3}} H T^{\\frac{2}{3}})$, where $S$ and $A$ are the numbers of states and actions, respectively, $\\Delta>0$ is the variation budget, $H$ is the number of time steps per episode, and $T$ is the total number of time steps. We further show that our algorithm is \\emph{nearly optimal} by establishing an information-theoretical lower bound of $\\Omega(S^{\\frac{1}{3}} A^{\\frac{1}{3}} \\Delta^{\\frac{1}{3}} H^{\\frac{2}{3}} T^{\\frac{2}{3}})$, the first lower bound in non-stationary RL. Numerical experiments validate the advantages of RestartQ-UCB in terms of both cumulative rewards and computational efficiency. We further demonstrate the power of our results in the context of multi-agent RL, where non-stationarity is a key challenge.", "bibtex": "@InProceedings{pmlr-v139-mao21b,\n title = \t {Near-Optimal Model-Free Reinforcement Learning in Non-Stationary Episodic MDPs},\n author = {Mao, Weichao and Zhang, Kaiqing and Zhu, Ruihao and Simchi-Levi, David and Basar, Tamer},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7447--7458},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mao21b/mao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/mao21b.html},\n abstract = \t {We consider model-free reinforcement learning (RL) in non-stationary Markov decision processes. Both the reward functions and the state transition functions are allowed to vary arbitrarily over time as long as their cumulative variations do not exceed certain variation budgets. We propose Restarted Q-Learning with Upper Confidence Bounds (RestartQ-UCB), the first model-free algorithm for non-stationary RL, and show that it outperforms existing solutions in terms of dynamic regret. Specifically, RestartQ-UCB with Freedman-type bonus terms achieves a dynamic regret bound of $\\widetilde{O}(S^{\\frac{1}{3}} A^{\\frac{1}{3}} \\Delta^{\\frac{1}{3}} H T^{\\frac{2}{3}})$, where $S$ and $A$ are the numbers of states and actions, respectively, $\\Delta>0$ is the variation budget, $H$ is the number of time steps per episode, and $T$ is the total number of time steps. We further show that our algorithm is \\emph{nearly optimal} by establishing an information-theoretical lower bound of $\\Omega(S^{\\frac{1}{3}} A^{\\frac{1}{3}} \\Delta^{\\frac{1}{3}} H^{\\frac{2}{3}} T^{\\frac{2}{3}})$, the first lower bound in non-stationary RL. Numerical experiments validate the advantages of RestartQ-UCB in terms of both cumulative rewards and computational efficiency. We further demonstrate the power of our results in the context of multi-agent RL, where non-stationarity is a key challenge.}\n}", "pdf": "http://proceedings.mlr.press/v139/mao21b/mao21b.pdf", "supp": "", "pdf_size": 1484015, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3958047251466597299&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering & Coordinated Science Laboratory, University of Illinois Urbana-Champaign, Urbana, IL, USA+Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Cambridge, MA, USA; Department of Electrical and Computer Engineering & Coordinated Science Laboratory, University of Illinois Urbana-Champaign, Urbana, IL, USA+Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Cambridge, MA, USA; Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Cambridge, MA, USA; Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Cambridge, MA, USA; Department of Electrical and Computer Engineering & Coordinated Science Laboratory, University of Illinois Urbana-Champaign, Urbana, IL, USA+Institute for Data, Systems, and Society, Massachusetts Institute of Technology, Cambridge, MA, USA", "aff_domain": "illinois.edu;illinois.edu;mit.edu;mit.edu;illinois.edu", "email": "illinois.edu;illinois.edu;mit.edu;mit.edu;illinois.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mao21b.html", "aff_unique_index": "0+1;0+1;1;1;0+1", "aff_unique_norm": "University of Illinois Urbana-Champaign;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical and Computer Engineering;Institute for Data, Systems, and Society", "aff_unique_url": "https://illinois.edu;https://web.mit.edu", "aff_unique_abbr": "UIUC;MIT", "aff_campus_unique_index": "0+1;0+1;1;1;0+1", "aff_campus_unique": "Urbana;Cambridge", "aff_country_unique_index": "0+0;0+0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Representation Learning for Linear Bandits and Linear RL", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8469", "id": "8469", "proceeding": "http://proceedings.mlr.press/v139/hu21a.html", "slides": "/media/icml-2021/Slides/8469.pdf", "author_site": "Jiachen Hu, Xiaoyu Chen, Chi Jin, Lihong Li, Liwei Wang", "author": "Jiachen Hu; Xiaoyu Chen; Chi Jin; Lihong Li; Liwei Wang", "abstract": "This paper studies representation learning for multi-task linear bandits and multi-task episodic RL with linear value function approximation. We first consider the setting where we play $M$ linear bandits with dimension $d$ concurrently, and these bandits share a common $k$-dimensional linear representation so that $k\\ll d$ and $k \\ll M$. We propose a sample-efficient algorithm, MTLR-OFUL, which leverages the shared representation to achieve $\\tilde{O}(M\\sqrt{dkT} + d\\sqrt{kMT} )$ regret, with $T$ being the number of total steps. Our regret significantly improves upon the baseline $\\tilde{O}(Md\\sqrt{T})$ achieved by solving each task independently. We further develop a lower bound that shows our regret is near-optimal when $d > M$. Furthermore, we extend the algorithm and analysis to multi-task episodic RL with linear value function approximation under low inherent Bellman error (Zanette et al., 2020a). To the best of our knowledge, this is the first theoretical result that characterize the benefits of multi-task representation learning for exploration in RL with function approximation.", "bibtex": "@InProceedings{pmlr-v139-hu21a,\n title = \t {Near-Optimal Representation Learning for Linear Bandits and Linear RL},\n author = {Hu, Jiachen and Chen, Xiaoyu and Jin, Chi and Li, Lihong and Wang, Liwei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4349--4358},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hu21a/hu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hu21a.html},\n abstract = \t {This paper studies representation learning for multi-task linear bandits and multi-task episodic RL with linear value function approximation. We first consider the setting where we play $M$ linear bandits with dimension $d$ concurrently, and these bandits share a common $k$-dimensional linear representation so that $k\\ll d$ and $k \\ll M$. We propose a sample-efficient algorithm, MTLR-OFUL, which leverages the shared representation to achieve $\\tilde{O}(M\\sqrt{dkT} + d\\sqrt{kMT} )$ regret, with $T$ being the number of total steps. Our regret significantly improves upon the baseline $\\tilde{O}(Md\\sqrt{T})$ achieved by solving each task independently. We further develop a lower bound that shows our regret is near-optimal when $d > M$. Furthermore, we extend the algorithm and analysis to multi-task episodic RL with linear value function approximation under low inherent Bellman error (Zanette et al., 2020a). To the best of our knowledge, this is the first theoretical result that characterize the benefits of multi-task representation learning for exploration in RL with function approximation.}\n}", "pdf": "http://proceedings.mlr.press/v139/hu21a/hu21a.pdf", "supp": "", "pdf_size": 401740, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16366571951397061558&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Key Laboratory of Machine Perception, MOE, School of EECS, Peking University; Key Laboratory of Machine Perception, MOE, School of EECS, Peking University; Department of Electrical and Computer Engineering, Princeton University; Amazon; Center for Data Science, Peking University", "aff_domain": "pku.edu.cn;pku.edu.cn;princeton.edu;amazon.com;cis.pku.edu.cn", "email": "pku.edu.cn;pku.edu.cn;princeton.edu;amazon.com;cis.pku.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hu21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Peking University;Princeton University;Amazon", "aff_unique_dep": "School of EECS;Department of Electrical and Computer Engineering;Amazon.com, Inc.", "aff_unique_url": "http://www.pku.edu.cn;https://www.princeton.edu;https://www.amazon.com", "aff_unique_abbr": "Peking U;Princeton;Amazon", "aff_campus_unique_index": "1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0;0;1;1;0", "aff_country_unique": "China;United States" }, { "title": "Necessary and sufficient conditions for causal feature selection in time series with latent common causes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8441", "id": "8441", "proceeding": "http://proceedings.mlr.press/v139/mastakouri21a.html", "slides": "", "author_site": "Atalanti Mastakouri, Bernhard Sch\u00f6lkopf, Dominik Janzing", "author": "Atalanti A Mastakouri; Bernhard Sch\u00f6lkopf; Dominik Janzing", "abstract": "We study the identification of direct and indirect causes on time series with latent variables, and provide a constrained-based causal feature selection method, which we prove that is both sound and complete under some graph constraints. Our theory and estimation algorithm require only two conditional independence tests for each observed candidate time series to determine whether or not it is a cause of an observed target time series. Furthermore, our selection of the conditioning set is such that it improves signal to noise ratio. We apply our method on real data, and on a wide range of simulated experiments, which yield very low false positive and relatively low false negative rates.", "bibtex": "@InProceedings{pmlr-v139-mastakouri21a,\n title = \t {Necessary and sufficient conditions for causal feature selection in time series with latent common causes},\n author = {Mastakouri, Atalanti A and Sch{\\\"o}lkopf, Bernhard and Janzing, Dominik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7502--7511},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mastakouri21a/mastakouri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mastakouri21a.html},\n abstract = \t {We study the identification of direct and indirect causes on time series with latent variables, and provide a constrained-based causal feature selection method, which we prove that is both sound and complete under some graph constraints. Our theory and estimation algorithm require only two conditional independence tests for each observed candidate time series to determine whether or not it is a cause of an observed target time series. Furthermore, our selection of the conditioning set is such that it improves signal to noise ratio. We apply our method on real data, and on a wide range of simulated experiments, which yield very low false positive and relatively low false negative rates.}\n}", "pdf": "http://proceedings.mlr.press/v139/mastakouri21a/mastakouri21a.pdf", "supp": "", "pdf_size": 934981, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=828706052106770162&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Amazon Research Tuebingen, AWS Causality Group, Germany+Max Planck Institute for Intelligent Systems, Empirical Inference Department, Germany; Amazon Research Tuebingen, AWS Causality Group, Germany+Max Planck Institute for Intelligent Systems, Empirical Inference Department, Germany; Amazon Research Tuebingen, AWS Causality Group, Germany", "aff_domain": "amazon.de; ; ", "email": "amazon.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/mastakouri21a.html", "aff_unique_index": "0+1;0+1;0", "aff_unique_norm": "Amazon;Max Planck Institute for Intelligent Systems", "aff_unique_dep": "AWS Causality Group;Empirical Inference Department", "aff_unique_url": "https://www.amazon.science;https://www.mpituebingen.mpg.de", "aff_unique_abbr": "Amazon;MPI-IS", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tuebingen;", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "Germany" }, { "title": "Neighborhood Contrastive Learning Applied to Online Patient Monitoring", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9349", "id": "9349", "proceeding": "http://proceedings.mlr.press/v139/yeche21a.html", "slides": "", "author_site": "Hugo Y\u00e8che, Gideon Dresdner, Francesco Locatello, Matthias H\u00fcser, Gunnar R\u00e4tsch", "author": "Hugo Y\u00e8che; Gideon Dresdner; Francesco Locatello; Matthias H\u00fcser; Gunnar R\u00e4tsch", "abstract": "Intensive care units (ICU) are increasingly looking towards machine learning for methods to provide online monitoring of critically ill patients. In machine learning, online monitoring is often formulated as a supervised learning problem. Recently, contrastive learning approaches have demonstrated promising improvements over competitive supervised benchmarks. These methods rely on well-understood data augmentation techniques developed for image data which do not apply to online monitoring. In this work, we overcome this limitation by supplementing time-series data augmentation techniques with a novel contrastive learning objective which we call neighborhood contrastive learning (NCL). Our objective explicitly groups together contiguous time segments from each patient while maintaining state-specific information. Our experiments demonstrate a marked improvement over existing work applying contrastive methods to medical time-series.", "bibtex": "@InProceedings{pmlr-v139-yeche21a,\n title = \t {Neighborhood Contrastive Learning Applied to Online Patient Monitoring},\n author = {Y{\\`e}che, Hugo and Dresdner, Gideon and Locatello, Francesco and H{\\\"u}ser, Matthias and R{\\\"a}tsch, Gunnar},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11964--11974},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yeche21a/yeche21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yeche21a.html},\n abstract = \t {Intensive care units (ICU) are increasingly looking towards machine learning for methods to provide online monitoring of critically ill patients. In machine learning, online monitoring is often formulated as a supervised learning problem. Recently, contrastive learning approaches have demonstrated promising improvements over competitive supervised benchmarks. These methods rely on well-understood data augmentation techniques developed for image data which do not apply to online monitoring. In this work, we overcome this limitation by supplementing time-series data augmentation techniques with a novel contrastive learning objective which we call neighborhood contrastive learning (NCL). Our objective explicitly groups together contiguous time segments from each patient while maintaining state-specific information. Our experiments demonstrate a marked improvement over existing work applying contrastive methods to medical time-series.}\n}", "pdf": "http://proceedings.mlr.press/v139/yeche21a/yeche21a.pdf", "supp": "", "pdf_size": 2026570, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4664115316667000917&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, ETH Z \u00a8urich, Switzerland; Department of Computer Science, ETH Z \u00a8urich, Switzerland; Amazon (most work was done when Francesco was at ETH Zurich and MPI-IS); Department of Computer Science, ETH Z \u00a8urich, Switzerland; Department of Computer Science, ETH Z \u00a8urich, Switzerland", "aff_domain": "ethz.ch;ethz.ch; ; ; ", "email": "ethz.ch;ethz.ch; ; ; ", "github": "https://github.com/ratschlab/ncl", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yeche21a.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "ETH Zurich;Amazon", "aff_unique_dep": "Department of Computer Science;Amazon", "aff_unique_url": "https://www.ethz.ch;https://www.amazon.com", "aff_unique_abbr": "ETHZ;Amazon", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "Switzerland;United States" }, { "title": "Network Inference and Influence Maximization from Samples", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9453", "id": "9453", "proceeding": "http://proceedings.mlr.press/v139/chen21q.html", "slides": "", "author_site": "Wei Chen, Xiaoming Sun, Jialin Zhang, Zhijie Zhang", "author": "Wei Chen; Xiaoming Sun; Jialin Zhang; Zhijie Zhang", "abstract": "Influence maximization is the task of selecting a small number of seed nodes in a social network to maximize the spread of the influence from these seeds, and it has been widely investigated in the past two decades. In the canonical setting, the whole social network as well as its diffusion parameters is given as input. In this paper, we consider the more realistic sampling setting where the network is unknown and we only have a set of passively observed cascades that record the set of activated nodes at each diffusion step. We study the task of influence maximization from these cascade samples (IMS), and present constant approximation algorithms for this task under mild conditions on the seed set distribution. To achieve the optimization goal, we also provide a novel solution to the network inference problem, that is, learning diffusion parameters and the network structure from the cascade data. Comparing with prior solutions, our network inference algorithm requires weaker assumptions and does not rely on maximum-likelihood estimation and convex programming. Our IMS algorithms enhance the learning-and-then-optimization approach by allowing a constant approximation ratio even when the diffusion parameters are hard to learn, and we do not need any assumption related to the network structure or diffusion parameters.", "bibtex": "@InProceedings{pmlr-v139-chen21q,\n title = \t {Network Inference and Influence Maximization from Samples},\n author = {Chen, Wei and Sun, Xiaoming and Zhang, Jialin and Zhang, Zhijie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1707--1716},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21q/chen21q.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21q.html},\n abstract = \t {Influence maximization is the task of selecting a small number of seed nodes in a social network to maximize the spread of the influence from these seeds, and it has been widely investigated in the past two decades. In the canonical setting, the whole social network as well as its diffusion parameters is given as input. In this paper, we consider the more realistic sampling setting where the network is unknown and we only have a set of passively observed cascades that record the set of activated nodes at each diffusion step. We study the task of influence maximization from these cascade samples (IMS), and present constant approximation algorithms for this task under mild conditions on the seed set distribution. To achieve the optimization goal, we also provide a novel solution to the network inference problem, that is, learning diffusion parameters and the network structure from the cascade data. Comparing with prior solutions, our network inference algorithm requires weaker assumptions and does not rely on maximum-likelihood estimation and convex programming. Our IMS algorithms enhance the learning-and-then-optimization approach by allowing a constant approximation ratio even when the diffusion parameters are hard to learn, and we do not need any assumption related to the network structure or diffusion parameters.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21q/chen21q.pdf", "supp": "", "pdf_size": 296267, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1401253518719807353&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Microsoft Research Asia, Beijing, China; Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China + School of Computer Science and Technology, University of Chinese Academy of Sciences, Beijing, China; Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China + School of Computer Science and Technology, University of Chinese Academy of Sciences, Beijing, China; Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China + School of Computer Science and Technology, University of Chinese Academy of Sciences, Beijing, China", "aff_domain": "microsoft.com;ict.ac.cn;ict.ac.cn;ict.ac.cn", "email": "microsoft.com;ict.ac.cn;ict.ac.cn;ict.ac.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chen21q.html", "aff_unique_index": "0;1+2;1+2;1+2", "aff_unique_norm": "Microsoft;Chinese Academy of Sciences;University of Chinese Academy of Sciences", "aff_unique_dep": "Research;Institute of Computing Technology;School of Computer Science and Technology", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/asia;http://www.ict.ac.cn;http://www.ucas.ac.cn", "aff_unique_abbr": "MSRA;CAS;UCAS", "aff_campus_unique_index": "0;0+0;0+0;0+0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0+0;0+0;0+0", "aff_country_unique": "China" }, { "title": "Neural Architecture Search without Training", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9263", "id": "9263", "proceeding": "http://proceedings.mlr.press/v139/mellor21a.html", "slides": "", "author_site": "Joe Mellor, Jack Turner, Amos Storkey, Elliot Crowley", "author": "Joe Mellor; Jack Turner; Amos Storkey; Elliot J Crowley", "abstract": "The time and effort involved in hand-designing deep neural networks is immense. This has prompted the development of Neural Architecture Search (NAS) techniques to automate this design. However, NAS algorithms tend to be slow and expensive; they need to train vast numbers of candidate networks to inform the search process. This could be alleviated if we could partially predict a network\u2019s trained accuracy from its initial state. In this work, we examine the overlap of activations between datapoints in untrained networks and motivate how this can give a measure which is usefully indicative of a network\u2019s trained performance. We incorporate this measure into a simple algorithm that allows us to search for powerful networks without any training in a matter of seconds on a single GPU, and verify its effectiveness on NAS-Bench-101, NAS-Bench-201, NATS-Bench, and Network Design Spaces. Our approach can be readily combined with more expensive search methods; we examine a simple adaptation of regularised evolutionary search. Code for reproducing our experiments is available at https://github.com/BayesWatch/nas-without-training.", "bibtex": "@InProceedings{pmlr-v139-mellor21a,\n title = \t {Neural Architecture Search without Training},\n author = {Mellor, Joe and Turner, Jack and Storkey, Amos and Crowley, Elliot J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7588--7598},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mellor21a/mellor21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mellor21a.html},\n abstract = \t {The time and effort involved in hand-designing deep neural networks is immense. This has prompted the development of Neural Architecture Search (NAS) techniques to automate this design. However, NAS algorithms tend to be slow and expensive; they need to train vast numbers of candidate networks to inform the search process. This could be alleviated if we could partially predict a network\u2019s trained accuracy from its initial state. In this work, we examine the overlap of activations between datapoints in untrained networks and motivate how this can give a measure which is usefully indicative of a network\u2019s trained performance. We incorporate this measure into a simple algorithm that allows us to search for powerful networks without any training in a matter of seconds on a single GPU, and verify its effectiveness on NAS-Bench-101, NAS-Bench-201, NATS-Bench, and Network Design Spaces. Our approach can be readily combined with more expensive search methods; we examine a simple adaptation of regularised evolutionary search. Code for reproducing our experiments is available at https://github.com/BayesWatch/nas-without-training.}\n}", "pdf": "http://proceedings.mlr.press/v139/mellor21a/mellor21a.pdf", "supp": "", "pdf_size": 1602422, "gs_citation": 538, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12821590639566718193&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Usher Institute, University of Edinburgh; School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh; School of Engineering, University of Edinburgh", "aff_domain": "ed.ac.uk; ; ; ", "email": "ed.ac.uk; ; ; ", "github": "https://github.com/BayesWatch/nas-without-training", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/mellor21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Usher Institute", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Edinburgh;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Neural Feature Matching in Implicit 3D Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9921", "id": "9921", "proceeding": "http://proceedings.mlr.press/v139/chen21f.html", "slides": "/media/icml-2021/Slides/9921.pdf", "author_site": "Yunlu Chen, Basura Fernando, Hakan Bilen, Thomas Mensink, Efstratios Gavves", "author": "Yunlu Chen; Basura Fernando; Hakan Bilen; Thomas Mensink; Efstratios Gavves", "abstract": "Recently, neural implicit functions have achieved impressive results for encoding 3D shapes. Conditioning on low-dimensional latent codes generalises a single implicit function to learn shared representation space for a variety of shapes, with the advantage of smooth interpolation. While the benefits from the global latent space do not correspond to explicit points at local level, we propose to track the continuous point trajectory by matching implicit features with the latent code interpolating between shapes, from which we corroborate the hierarchical functionality of the deep implicit functions, where early layers map the latent code to fitting the coarse shape structure, and deeper layers further refine the shape details. Furthermore, the structured representation space of implicit functions enables to apply feature matching for shape deformation, with the benefits to handle topology and semantics inconsistency, such as from an armchair to a chair with no arms, without explicit flow functions or manual annotations.", "bibtex": "@InProceedings{pmlr-v139-chen21f,\n title = \t {Neural Feature Matching in Implicit 3D Representations},\n author = {Chen, Yunlu and Fernando, Basura and Bilen, Hakan and Mensink, Thomas and Gavves, Efstratios},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1582--1593},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21f/chen21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21f.html},\n abstract = \t {Recently, neural implicit functions have achieved impressive results for encoding 3D shapes. Conditioning on low-dimensional latent codes generalises a single implicit function to learn shared representation space for a variety of shapes, with the advantage of smooth interpolation. While the benefits from the global latent space do not correspond to explicit points at local level, we propose to track the continuous point trajectory by matching implicit features with the latent code interpolating between shapes, from which we corroborate the hierarchical functionality of the deep implicit functions, where early layers map the latent code to fitting the coarse shape structure, and deeper layers further refine the shape details. Furthermore, the structured representation space of implicit functions enables to apply feature matching for shape deformation, with the benefits to handle topology and semantics inconsistency, such as from an armchair to a chair with no arms, without explicit flow functions or manual annotations.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21f/chen21f.pdf", "supp": "", "pdf_size": 9039974, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=979257905687382455&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Informatics Institute, University of Amsterdam, the Netherlands; AI3, IHPC, A*STAR, Singapore; School of Informatics, University of Edinburgh, Scotland; Google Research, Amsterdam, the Netherlands; Informatics Institute, University of Amsterdam, the Netherlands", "aff_domain": "uva.nl; ; ; ; ", "email": "uva.nl; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/chen21f.html", "aff_unique_index": "0;1;2;3;0", "aff_unique_norm": "University of Amsterdam;A*STAR;University of Edinburgh;Google", "aff_unique_dep": "Informatics Institute;AI3, IHPC;School of Informatics;Research", "aff_unique_url": "https://www.uva.nl;https://www.a-star.edu.sg;https://www.ed.ac.uk;https://research.google", "aff_unique_abbr": "UvA;A*STAR;Edinburgh;Google Research", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Edinburgh;Amsterdam", "aff_country_unique_index": "0;1;2;0;0", "aff_country_unique": "Netherlands;Singapore;United Kingdom" }, { "title": "Neural Pharmacodynamic State Space Modeling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9577", "id": "9577", "proceeding": "http://proceedings.mlr.press/v139/hussain21a.html", "slides": "", "author_site": "Zeshan Hussain, Rahul G. Krishnan, David Sontag", "author": "Zeshan M Hussain; Rahul G. Krishnan; David Sontag", "abstract": "Modeling the time-series of high-dimensional, longitudinal data is important for predicting patient disease progression. However, existing neural network based approaches that learn representations of patient state, while very flexible, are susceptible to overfitting. We propose a deep generative model that makes use of a novel attention-based neural architecture inspired by the physics of how treatments affect disease state. The result is a scalable and accurate model of high-dimensional patient biomarkers as they vary over time. Our proposed model yields significant improvements in generalization and, on real-world clinical data, provides interpretable insights into the dynamics of cancer progression.", "bibtex": "@InProceedings{pmlr-v139-hussain21a,\n title = \t {Neural Pharmacodynamic State Space Modeling},\n author = {Hussain, Zeshan M and Krishnan, Rahul G. and Sontag, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4500--4510},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hussain21a/hussain21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hussain21a.html},\n abstract = \t {Modeling the time-series of high-dimensional, longitudinal data is important for predicting patient disease progression. However, existing neural network based approaches that learn representations of patient state, while very flexible, are susceptible to overfitting. We propose a deep generative model that makes use of a novel attention-based neural architecture inspired by the physics of how treatments affect disease state. The result is a scalable and accurate model of high-dimensional patient biomarkers as they vary over time. Our proposed model yields significant improvements in generalization and, on real-world clinical data, provides interpretable insights into the dynamics of cancer progression.}\n}", "pdf": "http://proceedings.mlr.press/v139/hussain21a/hussain21a.pdf", "supp": "", "pdf_size": 1284389, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14510307593159697487&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Massachussetts Institute of Technology, CSAIL and IMES, Cambridge, MA; Microsoft Research New England, Cambridge, MA; Massachussetts Institute of Technology, CSAIL and IMES, Cambridge, MA", "aff_domain": "mit.edu;mit.edu; ", "email": "mit.edu;mit.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hussain21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;Microsoft", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory (CSAIL), Institute for Medical Engineering and Science (IMES);New England", "aff_unique_url": "https://www.mit.edu;https://www.microsoft.com/en-us/research/group/new-england", "aff_unique_abbr": "MIT;MSR NE", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Neural Rough Differential Equations for Long Time Series", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10039", "id": "10039", "proceeding": "http://proceedings.mlr.press/v139/morrill21b.html", "slides": "", "author_site": "James Morrill, Cristopher Salvi, Patrick Kidger, James Foster", "author": "James Morrill; Cristopher Salvi; Patrick Kidger; James Foster", "abstract": "Neural controlled differential equations (CDEs) are the continuous-time analogue of recurrent neural networks, as Neural ODEs are to residual networks, and offer a memory-efficient continuous-time way to model functions of potentially irregular time series. Existing methods for computing the forward pass of a Neural CDE involve embedding the incoming time series into path space, often via interpolation, and using evaluations of this path to drive the hidden state. Here, we use rough path theory to extend this formulation. Instead of directly embedding into path space, we instead represent the input signal over small time intervals through its \\textit{log-signature}, which are statistics describing how the signal drives a CDE. This is the approach for solving \\textit{rough differential equations} (RDEs), and correspondingly we describe our main contribution as the introduction of Neural RDEs. This extension has a purpose: by generalising the Neural CDE approach to a broader class of driving signals, we demonstrate particular advantages for tackling long time series. In this regime, we demonstrate efficacy on problems of length up to 17k observations and observe significant training speed-ups, improvements in model performance, and reduced memory requirements compared to existing approaches.", "bibtex": "@InProceedings{pmlr-v139-morrill21b,\n title = \t {Neural Rough Differential Equations for Long Time Series},\n author = {Morrill, James and Salvi, Cristopher and Kidger, Patrick and Foster, James},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7829--7838},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/morrill21b/morrill21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/morrill21b.html},\n abstract = \t {Neural controlled differential equations (CDEs) are the continuous-time analogue of recurrent neural networks, as Neural ODEs are to residual networks, and offer a memory-efficient continuous-time way to model functions of potentially irregular time series. Existing methods for computing the forward pass of a Neural CDE involve embedding the incoming time series into path space, often via interpolation, and using evaluations of this path to drive the hidden state. Here, we use rough path theory to extend this formulation. Instead of directly embedding into path space, we instead represent the input signal over small time intervals through its \\textit{log-signature}, which are statistics describing how the signal drives a CDE. This is the approach for solving \\textit{rough differential equations} (RDEs), and correspondingly we describe our main contribution as the introduction of Neural RDEs. This extension has a purpose: by generalising the Neural CDE approach to a broader class of driving signals, we demonstrate particular advantages for tackling long time series. In this regime, we demonstrate efficacy on problems of length up to 17k observations and observe significant training speed-ups, improvements in model performance, and reduced memory requirements compared to existing approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/morrill21b/morrill21b.pdf", "supp": "", "pdf_size": 546004, "gs_citation": 178, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14779638466722098800&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Mathematical Institute, University of Oxford, UK + The Alan Turing Institute, British Library, UK; Mathematical Institute, University of Oxford, UK + The Alan Turing Institute, British Library, UK; Mathematical Institute, University of Oxford, UK + The Alan Turing Institute, British Library, UK; Mathematical Institute, University of Oxford, UK + The Alan Turing Institute, British Library, UK", "aff_domain": "maths.ox.ac.uk; ; ; ", "email": "maths.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/morrill21b.html", "aff_unique_index": "0+1;0+1;0+1;0+1", "aff_unique_norm": "University of Oxford;Alan Turing Institute", "aff_unique_dep": "Mathematical Institute;", "aff_unique_url": "https://www.ox.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "Oxford;ATI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Oxford;", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Neural SDEs as Infinite-Dimensional GANs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10277", "id": "10277", "proceeding": "http://proceedings.mlr.press/v139/kidger21b.html", "slides": "", "author_site": "Patrick Kidger, James Foster, Xuechen Li, Terry Lyons", "author": "Patrick Kidger; James Foster; Xuechen Li; Terry J Lyons", "abstract": "Stochastic differential equations (SDEs) are a staple of mathematical modelling of temporal dynamics. However, a fundamental limitation has been that such models have typically been relatively inflexible, which recent work introducing Neural SDEs has sought to solve. Here, we show that the current classical approach to fitting SDEs may be approached as a special case of (Wasserstein) GANs, and in doing so the neural and classical regimes may be brought together. The input noise is Brownian motion, the output samples are time-evolving paths produced by a numerical solver, and by parameterising a discriminator as a Neural Controlled Differential Equation (CDE), we obtain Neural SDEs as (in modern machine learning parlance) continuous-time generative time series models. Unlike previous work on this problem, this is a direct extension of the classical approach without reference to either prespecified statistics or density functions. Arbitrary drift and diffusions are admissible, so as the Wasserstein loss has a unique global minima, in the infinite data limit \\textit{any} SDE may be learnt.", "bibtex": "@InProceedings{pmlr-v139-kidger21b,\n title = \t {Neural SDEs as Infinite-Dimensional GANs},\n author = {Kidger, Patrick and Foster, James and Li, Xuechen and Lyons, Terry J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5453--5463},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kidger21b/kidger21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kidger21b.html},\n abstract = \t {Stochastic differential equations (SDEs) are a staple of mathematical modelling of temporal dynamics. However, a fundamental limitation has been that such models have typically been relatively inflexible, which recent work introducing Neural SDEs has sought to solve. Here, we show that the current classical approach to fitting SDEs may be approached as a special case of (Wasserstein) GANs, and in doing so the neural and classical regimes may be brought together. The input noise is Brownian motion, the output samples are time-evolving paths produced by a numerical solver, and by parameterising a discriminator as a Neural Controlled Differential Equation (CDE), we obtain Neural SDEs as (in modern machine learning parlance) continuous-time generative time series models. Unlike previous work on this problem, this is a direct extension of the classical approach without reference to either prespecified statistics or density functions. Arbitrary drift and diffusions are admissible, so as the Wasserstein loss has a unique global minima, in the infinite data limit \\textit{any} SDE may be learnt.}\n}", "pdf": "http://proceedings.mlr.press/v139/kidger21b/kidger21b.pdf", "supp": "", "pdf_size": 542587, "gs_citation": 201, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5987016743553578663&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kidger21b.html" }, { "title": "Neural Symbolic Regression that scales", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9713", "id": "9713", "proceeding": "http://proceedings.mlr.press/v139/biggio21a.html", "slides": "", "author_site": "Luca Biggio, Tommaso Bendinelli, Alexander Neitz, Aurelien Lucchi, Giambattista Parascandolo", "author": "Luca Biggio; Tommaso Bendinelli; Alexander Neitz; Aurelien Lucchi; Giambattista Parascandolo", "abstract": "Symbolic equations are at the core of scientific discovery. The task of discovering the underlying equation from a set of input-output pairs is called symbolic regression. Traditionally, symbolic regression methods use hand-designed strategies that do not improve with experience. In this paper, we introduce the first symbolic regression method that leverages large scale pre-training. We procedurally generate an unbounded set of equations, and simultaneously pre-train a Transformer to predict the symbolic equation from a corresponding set of input-output-pairs. At test time, we query the model on a new set of points and use its output to guide the search for the equation. We show empirically that this approach can re-discover a set of well-known physical equations, and that it improves over time with more data and compute.", "bibtex": "@InProceedings{pmlr-v139-biggio21a,\n title = \t {Neural Symbolic Regression that scales},\n author = {Biggio, Luca and Bendinelli, Tommaso and Neitz, Alexander and Lucchi, Aurelien and Parascandolo, Giambattista},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {936--945},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/biggio21a/biggio21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/biggio21a.html},\n abstract = \t {Symbolic equations are at the core of scientific discovery. The task of discovering the underlying equation from a set of input-output pairs is called symbolic regression. Traditionally, symbolic regression methods use hand-designed strategies that do not improve with experience. In this paper, we introduce the first symbolic regression method that leverages large scale pre-training. We procedurally generate an unbounded set of equations, and simultaneously pre-train a Transformer to predict the symbolic equation from a corresponding set of input-output-pairs. At test time, we query the model on a new set of points and use its output to guide the search for the equation. We show empirically that this approach can re-discover a set of well-known physical equations, and that it improves over time with more data and compute.}\n}", "pdf": "http://proceedings.mlr.press/v139/biggio21a/biggio21a.pdf", "supp": "", "pdf_size": 4829572, "gs_citation": 232, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13426541991949181353&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, ETH, Z\u00fcrich, Switzerland+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; CSEM SA, Alpnach, Switzerland; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Department of Computer Science, ETH, Z\u00fcrich, Switzerland+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Department of Computer Science, ETH, Z\u00fcrich, Switzerland+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "inf.ethz.ch;csem.ch; ; ; ", "email": "inf.ethz.ch;csem.ch; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/biggio21a.html", "aff_unique_index": "0+1;2;1;0+1;0+1", "aff_unique_norm": "ETH Zurich;Max Planck Institute for Intelligent Systems;CSEM SA", "aff_unique_dep": "Department of Computer Science;;", "aff_unique_url": "https://www.ethz.ch;https://www.mpi-is.mpg.de;https://www.csem.ch", "aff_unique_abbr": "ETH;MPI-IS;", "aff_campus_unique_index": "0+1;1;0+1;0+1", "aff_campus_unique": "Z\u00fcrich;T\u00fcbingen;", "aff_country_unique_index": "0+1;0;1;0+1;0+1", "aff_country_unique": "Switzerland;Germany" }, { "title": "Neural Tangent Generalization Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10215", "id": "10215", "proceeding": "http://proceedings.mlr.press/v139/yuan21b.html", "slides": "/media/icml-2021/Slides/10215.pdf", "author_site": "Chia-Hung Yuan, Shan-Hung (Brandon) Wu", "author": "Chia-Hung Yuan; Shan-Hung Wu", "abstract": "The remarkable performance achieved by Deep Neural Networks (DNNs) in many applications is followed by the rising concern about data privacy and security. Since DNNs usually require large datasets to train, many practitioners scrape data from external sources such as the Internet. However, an external data owner may not be willing to let this happen, causing legal or ethical issues. In this paper, we study the generalization attacks against DNNs, where an attacker aims to slightly modify training data in order to spoil the training process such that a trained network lacks generalizability. These attacks can be performed by data owners and protect data from unexpected use. However, there is currently no efficient generalization attack against DNNs due to the complexity of a bilevel optimization involved. We propose the Neural Tangent Generalization Attack (NTGA) that, to the best of our knowledge, is the first work enabling clean-label, black-box generalization attack against DNNs. We conduct extensive experiments, and the empirical results demonstrate the effectiveness of NTGA. Our code and perturbed datasets are available at: https://github.com/lionelmessi6410/ntga.", "bibtex": "@InProceedings{pmlr-v139-yuan21b,\n title = \t {Neural Tangent Generalization Attacks},\n author = {Yuan, Chia-Hung and Wu, Shan-Hung},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12230--12240},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yuan21b/yuan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yuan21b.html},\n abstract = \t {The remarkable performance achieved by Deep Neural Networks (DNNs) in many applications is followed by the rising concern about data privacy and security. Since DNNs usually require large datasets to train, many practitioners scrape data from external sources such as the Internet. However, an external data owner may not be willing to let this happen, causing legal or ethical issues. In this paper, we study the generalization attacks against DNNs, where an attacker aims to slightly modify training data in order to spoil the training process such that a trained network lacks generalizability. These attacks can be performed by data owners and protect data from unexpected use. However, there is currently no efficient generalization attack against DNNs due to the complexity of a bilevel optimization involved. We propose the Neural Tangent Generalization Attack (NTGA) that, to the best of our knowledge, is the first work enabling clean-label, black-box generalization attack against DNNs. We conduct extensive experiments, and the empirical results demonstrate the effectiveness of NTGA. Our code and perturbed datasets are available at: https://github.com/lionelmessi6410/ntga.}\n}", "pdf": "http://proceedings.mlr.press/v139/yuan21b/yuan21b.pdf", "supp": "", "pdf_size": 4695245, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6660533520605377256&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yuan21b.html" }, { "title": "Neural Transformation Learning for Deep Anomaly Detection Beyond Images", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10329", "id": "10329", "proceeding": "http://proceedings.mlr.press/v139/qiu21a.html", "slides": "/media/icml-2021/Slides/10329.pdf", "author_site": "Chen Qiu, Timo Pfrommer, Marius Kloft, Stephan Mandt, Maja Rudolph", "author": "Chen Qiu; Timo Pfrommer; Marius Kloft; Stephan Mandt; Maja Rudolph", "abstract": "Data transformations (e.g. rotations, reflections, and cropping) play an important role in self-supervised learning. Typically, images are transformed into different views, and neural networks trained on tasks involving these views produce useful feature representations for downstream tasks, including anomaly detection. However, for anomaly detection beyond image data, it is often unclear which transformations to use. Here we present a simple end-to-end procedure for anomaly detection with learnable transformations. The key idea is to embed the transformed data into a semantic space such that the transformed data still resemble their untransformed form, while different transformations are easily distinguishable. Extensive experiments on time series show that our proposed method outperforms existing approaches in the one-vs.-rest setting and is competitive in the more challenging n-vs.-rest anomaly-detection task. On medical and cyber-security tabular data, our method learns domain-specific transformations and detects anomalies more accurately than previous work.", "bibtex": "@InProceedings{pmlr-v139-qiu21a,\n title = \t {Neural Transformation Learning for Deep Anomaly Detection Beyond Images},\n author = {Qiu, Chen and Pfrommer, Timo and Kloft, Marius and Mandt, Stephan and Rudolph, Maja},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8703--8714},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiu21a/qiu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiu21a.html},\n abstract = \t {Data transformations (e.g. rotations, reflections, and cropping) play an important role in self-supervised learning. Typically, images are transformed into different views, and neural networks trained on tasks involving these views produce useful feature representations for downstream tasks, including anomaly detection. However, for anomaly detection beyond image data, it is often unclear which transformations to use. Here we present a simple end-to-end procedure for anomaly detection with learnable transformations. The key idea is to embed the transformed data into a semantic space such that the transformed data still resemble their untransformed form, while different transformations are easily distinguishable. Extensive experiments on time series show that our proposed method outperforms existing approaches in the one-vs.-rest setting and is competitive in the more challenging n-vs.-rest anomaly-detection task. On medical and cyber-security tabular data, our method learns domain-specific transformations and detects anomalies more accurately than previous work.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiu21a/qiu21a.pdf", "supp": "", "pdf_size": 1470296, "gs_citation": 186, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1292087033558963213&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 12, "aff": "Bosch Center for AI; Bosch Center for AI; TU Kaiserslautern; UC Irvine; Bosch Center for AI", "aff_domain": "de.bosch.com;de.bosch.com;cs.uni-kl.de;uci.edu;de.bosch.com", "email": "de.bosch.com;de.bosch.com;cs.uni-kl.de;uci.edu;de.bosch.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/qiu21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Bosch Center for AI;Technische Universit\u00e4t Kaiserslautern;University of California, Irvine", "aff_unique_dep": "Center for AI;;", "aff_unique_url": "https://www.bosch-ai.com;https://www.tu-kl.de;https://www.uci.edu", "aff_unique_abbr": "BCAI;TU Kaiserslautern;UCI", "aff_campus_unique_index": "1", "aff_campus_unique": ";Irvine", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "Germany;United States" }, { "title": "Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9085", "id": "9085", "proceeding": "http://proceedings.mlr.press/v139/ma21b.html", "slides": "", "author_site": "Baorui Ma, Zhizhong Han, Yushen Liu, Matthias Zwicker", "author": "Baorui Ma; Zhizhong Han; Yu-Shen Liu; Matthias Zwicker", "abstract": "Reconstructing continuous surfaces from 3D point clouds is a fundamental operation in 3D geometry processing. Several recent state-of-the-art methods address this problem using neural networks to learn signed distance functions (SDFs). In this paper, we introduce Neural-Pull, a new approach that is simple and leads to high quality SDFs. Specifically, we train a neural network to pull query 3D locations to their closest points on the surface using the predicted signed distance values and the gradient at the query locations, both of which are computed by the network itself. The pulling operation moves each query location with a stride given by the distance predicted by the network. Based on the sign of the distance, this may move the query location along or against the direction of the gradient of the SDF. This is a differentiable operation that allows us to update the signed distance value and the gradient simultaneously during training. Our outperforming results under widely used benchmarks demonstrate that we can learn SDFs more accurately and flexibly for surface reconstruction and single image reconstruction than the state-of-the-art methods. Our code and data are available at https://github.com/mabaorui/NeuralPull.", "bibtex": "@InProceedings{pmlr-v139-ma21b,\n title = \t {Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface},\n author = {Ma, Baorui and Han, Zhizhong and Liu, Yu-Shen and Zwicker, Matthias},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7246--7257},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ma21b/ma21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/ma21b.html},\n abstract = \t {Reconstructing continuous surfaces from 3D point clouds is a fundamental operation in 3D geometry processing. Several recent state-of-the-art methods address this problem using neural networks to learn signed distance functions (SDFs). In this paper, we introduce Neural-Pull, a new approach that is simple and leads to high quality SDFs. Specifically, we train a neural network to pull query 3D locations to their closest points on the surface using the predicted signed distance values and the gradient at the query locations, both of which are computed by the network itself. The pulling operation moves each query location with a stride given by the distance predicted by the network. Based on the sign of the distance, this may move the query location along or against the direction of the gradient of the SDF. This is a differentiable operation that allows us to update the signed distance value and the gradient simultaneously during training. Our outperforming results under widely used benchmarks demonstrate that we can learn SDFs more accurately and flexibly for surface reconstruction and single image reconstruction than the state-of-the-art methods. Our code and data are available at https://github.com/mabaorui/NeuralPull.}\n}", "pdf": "http://proceedings.mlr.press/v139/ma21b/ma21b.pdf", "supp": "", "pdf_size": 8073584, "gs_citation": 184, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3625497984175678623&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Software, BNRist, Tsinghua University, Beijing 100084, P. R. China+1; Department of Computer Science, Wayne State University, Detroit, USA+2; School of Software, BNRist, Tsinghua University, Beijing 100084, P. R. China+1; Department of Computer Science, University of Maryland, College Park, USA+3", "aff_domain": "tsinghua.edu.cn;wayne.edu;tsinghua.edu.cn;cs.umd.edu", "email": "tsinghua.edu.cn;wayne.edu;tsinghua.edu.cn;cs.umd.edu", "github": "https://github.com/mabaorui/NeuralPull", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ma21b.html", "aff_unique_index": "0;2;0;3", "aff_unique_norm": "Tsinghua University;;Wayne State University;University of Maryland, College Park", "aff_unique_dep": "School of Software;;Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.tsinghua.edu.cn;;https://wayne.edu;https://www/umd.edu", "aff_unique_abbr": "THU;;WSU;UMD", "aff_campus_unique_index": "0;2;0;3", "aff_campus_unique": "Beijing;;Detroit;College Park", "aff_country_unique_index": "0;2;0;2", "aff_country_unique": "China;;United States" }, { "title": "Neuro-algorithmic Policies Enable Fast Combinatorial Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9465", "id": "9465", "proceeding": "http://proceedings.mlr.press/v139/vlastelica21a.html", "slides": "/media/icml-2021/Slides/9465.pdf", "author_site": "Marin Vlastelica, Michal Rolinek, Georg Martius", "author": "Marin Vlastelica; Michal Rolinek; Georg Martius", "abstract": "Although model-based and model-free approaches to learning the control of systems have achieved impressive results on standard benchmarks, generalization to task variations is still lacking. Recent results suggest that generalization for standard architectures improves only after obtaining exhaustive amounts of data. We give evidence that generalization capabilities are in many cases bottlenecked by the inability to generalize on the combinatorial aspects of the problem. We show that, for a certain subclass of the MDP framework, this can be alleviated by a neuro-algorithmic policy architecture that embeds a time-dependent shortest path solver in a deep neural network. Trained end-to-end via blackbox-differentiation, this method leads to considerable improvement in generalization capabilities in the low-data regime.", "bibtex": "@InProceedings{pmlr-v139-vlastelica21a,\n title = \t {Neuro-algorithmic Policies Enable Fast Combinatorial Generalization},\n author = {Vlastelica, Marin and Rolinek, Michal and Martius, Georg},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10575--10585},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vlastelica21a/vlastelica21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vlastelica21a.html},\n abstract = \t {Although model-based and model-free approaches to learning the control of systems have achieved impressive results on standard benchmarks, generalization to task variations is still lacking. Recent results suggest that generalization for standard architectures improves only after obtaining exhaustive amounts of data. We give evidence that generalization capabilities are in many cases bottlenecked by the inability to generalize on the combinatorial aspects of the problem. We show that, for a certain subclass of the MDP framework, this can be alleviated by a neuro-algorithmic policy architecture that embeds a time-dependent shortest path solver in a deep neural network. Trained end-to-end via blackbox-differentiation, this method leads to considerable improvement in generalization capabilities in the low-data regime.}\n}", "pdf": "http://proceedings.mlr.press/v139/vlastelica21a/vlastelica21a.pdf", "supp": "", "pdf_size": 9977325, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=401495911843743171&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "tue.mpg.de; ; ", "email": "tue.mpg.de; ; ", "github": "martius-lab.github.io/NAP", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/vlastelica21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Max Planck Institute for Intelligent Systems", "aff_unique_dep": "", "aff_unique_url": "https://www.mpi-is.mpg.de", "aff_unique_abbr": "MPI-IS", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "T\u00fcbingen", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Newton Method over Networks is Fast up to the Statistical Precision", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10747", "id": "10747", "proceeding": "http://proceedings.mlr.press/v139/daneshmand21a.html", "slides": "", "author_site": "Amir Daneshmand, Gesualdo Scutari, Pavel Dvurechenskii, Alexander Gasnikov", "author": "Amir Daneshmand; Gesualdo Scutari; Pavel Dvurechensky; Alexander Gasnikov", "abstract": "We propose a distributed cubic regularization of the Newton method for solving (constrained) empirical risk minimization problems over a network of agents, modeled as undirected graph. The algorithm employs an inexact, preconditioned Newton step at each agent\u2019s side: the gradient of the centralized loss is iteratively estimated via a gradient-tracking consensus mechanism and the Hessian is subsampled over the local data sets. No Hessian matrices are exchanged over the network. We derive global complexity bounds for convex and strongly convex losses. Our analysis reveals an interesting interplay between sample and iteration/communication complexity: statistically accurate solutions are achievable in roughly the same number of iterations of the centralized cubic Newton, with a communication cost per iteration of the order of $\\widetilde{\\mathcal{O}}\\big(1/\\sqrt{1-\\rho}\\big)$, where $\\rho$ characterizes the connectivity of the network. This represents a significant improvement with respect to existing, statistically oblivious, distributed Newton-based methods over networks.", "bibtex": "@InProceedings{pmlr-v139-daneshmand21a,\n title = \t {Newton Method over Networks is Fast up to the Statistical Precision},\n author = {Daneshmand, Amir and Scutari, Gesualdo and Dvurechensky, Pavel and Gasnikov, Alexander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2398--2409},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/daneshmand21a/daneshmand21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/daneshmand21a.html},\n abstract = \t {We propose a distributed cubic regularization of the Newton method for solving (constrained) empirical risk minimization problems over a network of agents, modeled as undirected graph. The algorithm employs an inexact, preconditioned Newton step at each agent\u2019s side: the gradient of the centralized loss is iteratively estimated via a gradient-tracking consensus mechanism and the Hessian is subsampled over the local data sets. No Hessian matrices are exchanged over the network. We derive global complexity bounds for convex and strongly convex losses. Our analysis reveals an interesting interplay between sample and iteration/communication complexity: statistically accurate solutions are achievable in roughly the same number of iterations of the centralized cubic Newton, with a communication cost per iteration of the order of $\\widetilde{\\mathcal{O}}\\big(1/\\sqrt{1-\\rho}\\big)$, where $\\rho$ characterizes the connectivity of the network. This represents a significant improvement with respect to existing, statistically oblivious, distributed Newton-based methods over networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/daneshmand21a/daneshmand21a.pdf", "supp": "", "pdf_size": 498592, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5233023015907306245&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Industrial Engineering, Purdue University, West-Lafayette, IN, USA; School of Industrial Engineering, Purdue University, West-Lafayette, IN, USA; Weierstrass Institute for Applied Analysis and Stochastics, Berlin, Germany + Higher School of Economics (HSE) University, Moscow, Russia; Higher School of Economics (HSE) University, Moscow, Russia + Moscow Institute of Physics and Technology, Dolgoprudny, Russia", "aff_domain": "purdue.edu; ; ; ", "email": "purdue.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/daneshmand21a.html", "aff_unique_index": "0;0;1+2;2+3", "aff_unique_norm": "Purdue University;Weierstrass Institute for Applied Analysis and Stochastics;Higher School of Economics (HSE) University;Moscow Institute of Physics and Technology", "aff_unique_dep": "School of Industrial Engineering;;;", "aff_unique_url": "https://www.purdue.edu;https://www.wias-berlin.de/;https://www.hse.ru;https://www.mipt.ru", "aff_unique_abbr": "Purdue;WIAS;HSE;MIPT", "aff_campus_unique_index": "0;0;1+2;2+3", "aff_campus_unique": "West-Lafayette;Berlin;Moscow;Dolgoprudny", "aff_country_unique_index": "0;0;1+2;2+2", "aff_country_unique": "United States;Germany;Russian Federation" }, { "title": "No-regret Algorithms for Capturing Events in Poisson Point Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9633", "id": "9633", "proceeding": "http://proceedings.mlr.press/v139/mutny21a.html", "slides": "", "author_site": "Mojmir Mutny, Andreas Krause", "author": "Mojmir Mutny; Andreas Krause", "abstract": "Inhomogeneous Poisson point processes are widely used models of event occurrences. We address \\emph{adaptive sensing of Poisson Point processes}, namely, maximizing the number of captured events subject to sensing costs. We encode prior assumptions on the rate function by modeling it as a member of a known \\emph{reproducing kernel Hilbert space} (RKHS). By partitioning the domain into separate small regions, and using heteroscedastic linear regression, we propose a tractable estimator of Poisson process rates for two feedback models: \\emph{count-record}, where exact locations of events are observed, and \\emph{histogram} feedback, where only counts of events are observed. We derive provably accurate anytime confidence estimates for our estimators for sequentially acquired Poisson count data. Using these, we formulate algorithms based on optimism that provably incur sublinear count-regret. We demonstrate the practicality of the method on problems from crime modeling, revenue maximization as well as environmental monitoring.", "bibtex": "@InProceedings{pmlr-v139-mutny21a,\n title = \t {No-regret Algorithms for Capturing Events in Poisson Point Processes},\n author = {Mutny, Mojmir and Krause, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7894--7904},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mutny21a/mutny21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mutny21a.html},\n abstract = \t {Inhomogeneous Poisson point processes are widely used models of event occurrences. We address \\emph{adaptive sensing of Poisson Point processes}, namely, maximizing the number of captured events subject to sensing costs. We encode prior assumptions on the rate function by modeling it as a member of a known \\emph{reproducing kernel Hilbert space} (RKHS). By partitioning the domain into separate small regions, and using heteroscedastic linear regression, we propose a tractable estimator of Poisson process rates for two feedback models: \\emph{count-record}, where exact locations of events are observed, and \\emph{histogram} feedback, where only counts of events are observed. We derive provably accurate anytime confidence estimates for our estimators for sequentially acquired Poisson count data. Using these, we formulate algorithms based on optimism that provably incur sublinear count-regret. We demonstrate the practicality of the method on problems from crime modeling, revenue maximization as well as environmental monitoring.}\n}", "pdf": "http://proceedings.mlr.press/v139/mutny21a/mutny21a.pdf", "supp": "", "pdf_size": 3385462, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8883867531261499869&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ", "email": "inf.ethz.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/mutny21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Noise and Fluctuation of Finite Learning Rate Stochastic Gradient Descent", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10649", "id": "10649", "proceeding": "http://proceedings.mlr.press/v139/liu21ad.html", "slides": "", "author_site": "Kangqiao Liu, Liu Ziyin, Masahito Ueda", "author": "Kangqiao Liu; Liu Ziyin; Masahito Ueda", "abstract": "In the vanishing learning rate regime, stochastic gradient descent (SGD) is now relatively well understood. In this work, we propose to study the basic properties of SGD and its variants in the non-vanishing learning rate regime. The focus is on deriving exactly solvable results and discussing their implications. The main contributions of this work are to derive the stationary distribution for discrete-time SGD in a quadratic loss function with and without momentum; in particular, one implication of our result is that the fluctuation caused by discrete-time dynamics takes a distorted shape and is dramatically larger than a continuous-time theory could predict. Examples of applications of the proposed theory considered in this work include the approximation error of variants of SGD, the effect of minibatch noise, the optimal Bayesian inference, the escape rate from a sharp minimum, and the stationary covariance of a few second-order methods including damped Newton\u2019s method, natural gradient descent, and Adam.", "bibtex": "@InProceedings{pmlr-v139-liu21ad,\n title = \t {Noise and Fluctuation of Finite Learning Rate Stochastic Gradient Descent},\n author = {Liu, Kangqiao and Ziyin, Liu and Ueda, Masahito},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7045--7056},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21ad/liu21ad.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21ad.html},\n abstract = \t {In the vanishing learning rate regime, stochastic gradient descent (SGD) is now relatively well understood. In this work, we propose to study the basic properties of SGD and its variants in the non-vanishing learning rate regime. The focus is on deriving exactly solvable results and discussing their implications. The main contributions of this work are to derive the stationary distribution for discrete-time SGD in a quadratic loss function with and without momentum; in particular, one implication of our result is that the fluctuation caused by discrete-time dynamics takes a distorted shape and is dramatically larger than a continuous-time theory could predict. Examples of applications of the proposed theory considered in this work include the approximation error of variants of SGD, the effect of minibatch noise, the optimal Bayesian inference, the escape rate from a sharp minimum, and the stationary covariance of a few second-order methods including damped Newton\u2019s method, natural gradient descent, and Adam.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21ad/liu21ad.pdf", "supp": "", "pdf_size": 1095998, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9296888519054608825&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "Department of Physics, the University of Tokyo, Japan+RIKEN CEMS, Japan+Institute for Physics of Intelligence, the University of Tokyo, Japan; Department of Physics, the University of Tokyo, Japan+RIKEN CEMS, Japan+Institute for Physics of Intelligence, the University of Tokyo, Japan; Department of Physics, the University of Tokyo, Japan+RIKEN CEMS, Japan+Institute for Physics of Intelligence, the University of Tokyo, Japan", "aff_domain": "cat.phys.s.u-tokyo.ac.jp;cat.phys.s.u-tokyo.ac.jp; ", "email": "cat.phys.s.u-tokyo.ac.jp;cat.phys.s.u-tokyo.ac.jp; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/liu21ad.html", "aff_unique_index": "0+1+0;0+1+0;0+1+0", "aff_unique_norm": "University of Tokyo;RIKEN", "aff_unique_dep": "Department of Physics;CEMS", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "UTokyo;RIKEN", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tokyo;", "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", "aff_country_unique": "Japan" }, { "title": "Non-Autoregressive Electron Redistribution Modeling for Reaction Prediction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9427", "id": "9427", "proceeding": "http://proceedings.mlr.press/v139/bi21a.html", "slides": "/media/icml-2021/Slides/9427_01NVWIK.pdf", "author_site": "Hangrui Bi, Hengyi Wang, Chence Shi, Connor Coley, Jian Tang, Hongyu Guo", "author": "Hangrui Bi; Hengyi Wang; Chence Shi; Connor Coley; Jian Tang; Hongyu Guo", "abstract": "Reliably predicting the products of chemical reactions presents a fundamental challenge in synthetic chemistry. Existing machine learning approaches typically produce a reaction product by sequentially forming its subparts or intermediate molecules. Such autoregressive methods, however, not only require a pre-defined order for the incremental construction but preclude the use of parallel decoding for efficient computation. To address these issues, we devise a non-autoregressive learning paradigm that predicts reaction in one shot. Leveraging the fact that chemical reactions can be described as a redistribution of electrons in molecules, we formulate a reaction as an arbitrary electron flow and predict it with a novel multi-pointer decoding network. Experiments on the USPTO-MIT dataset show that our approach has established a new state-of-the-art top-1 accuracy and achieves at least 27 times inference speedup over the state-of-the-art methods. Also, our predictions are easier for chemists to interpret owing to predicting the electron flows.", "bibtex": "@InProceedings{pmlr-v139-bi21a,\n title = \t {Non-Autoregressive Electron Redistribution Modeling for Reaction Prediction},\n author = {Bi, Hangrui and Wang, Hengyi and Shi, Chence and Coley, Connor and Tang, Jian and Guo, Hongyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {904--913},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bi21a/bi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bi21a.html},\n abstract = \t {Reliably predicting the products of chemical reactions presents a fundamental challenge in synthetic chemistry. Existing machine learning approaches typically produce a reaction product by sequentially forming its subparts or intermediate molecules. Such autoregressive methods, however, not only require a pre-defined order for the incremental construction but preclude the use of parallel decoding for efficient computation. To address these issues, we devise a non-autoregressive learning paradigm that predicts reaction in one shot. Leveraging the fact that chemical reactions can be described as a redistribution of electrons in molecules, we formulate a reaction as an arbitrary electron flow and predict it with a novel multi-pointer decoding network. Experiments on the USPTO-MIT dataset show that our approach has established a new state-of-the-art top-1 accuracy and achieves at least 27 times inference speedup over the state-of-the-art methods. Also, our predictions are easier for chemists to interpret owing to predicting the electron flows.}\n}", "pdf": "http://proceedings.mlr.press/v139/bi21a/bi21a.pdf", "supp": "", "pdf_size": 414786, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9737118567954473042&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Peking University; Peking University; Mila - Quebec AI Institute+University of Montr \u00b4eal; MIT; Mila - Quebec AI Institute+CIFAR AI Research Chair+HEC Montr \u00b4eal; National Research Council Canada", "aff_domain": "hec.ca;nrc-cnrc.gc.ca; ; ; ; ", "email": "hec.ca;nrc-cnrc.gc.ca; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/bi21a.html", "aff_unique_index": "0;0;1+2;3;1+4+5;6", "aff_unique_norm": "Peking University;Quebec AI Institute;University of Montreal;Massachusetts Institute of Technology;CIFAR;HEC Montr\u00e9al;National Research Council Canada", "aff_unique_dep": ";AI Institute;;;AI Research;;", "aff_unique_url": "http://www.pku.edu.cn;https://mila.quebec;https://www.mcgill.ca;https://web.mit.edu;https://www.cifar.ca;https://www.hec.ca;https://www.nrc-cnrc.gc.ca", "aff_unique_abbr": "Peking U;Mila;U Montreal;MIT;CIFAR;HEC;NRC-CNRC", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1+1;2;1+1+1;1", "aff_country_unique": "China;Canada;United States" }, { "title": "Non-Exponentially Weighted Aggregation: Regret Bounds for Unbounded Loss Functions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8841", "id": "8841", "proceeding": "http://proceedings.mlr.press/v139/alquier21a.html", "slides": "/media/icml-2021/Slides/8841.pdf", "author": "Pierre Alquier", "abstract": "We tackle the problem of online optimization with a general, possibly unbounded, loss function. It is well known that when the loss is bounded, the exponentially weighted aggregation strategy (EWA) leads to a regret in $\\sqrt{T}$ after $T$ steps. In this paper, we study a generalized aggregation strategy, where the weights no longer depend exponentially on the losses. Our strategy is based on Follow The Regularized Leader (FTRL): we minimize the expected losses plus a regularizer, that is here a $\\phi$-divergence. When the regularizer is the Kullback-Leibler divergence, we obtain EWA as a special case. Using alternative divergences enables unbounded losses, at the cost of a worst regret bound in some cases.", "bibtex": "@InProceedings{pmlr-v139-alquier21a,\n title = \t {Non-Exponentially Weighted Aggregation: Regret Bounds for Unbounded Loss Functions},\n author = {Alquier, Pierre},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {207--218},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/alquier21a/alquier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/alquier21a.html},\n abstract = \t {We tackle the problem of online optimization with a general, possibly unbounded, loss function. It is well known that when the loss is bounded, the exponentially weighted aggregation strategy (EWA) leads to a regret in $\\sqrt{T}$ after $T$ steps. In this paper, we study a generalized aggregation strategy, where the weights no longer depend exponentially on the losses. Our strategy is based on Follow The Regularized Leader (FTRL): we minimize the expected losses plus a regularizer, that is here a $\\phi$-divergence. When the regularizer is the Kullback-Leibler divergence, we obtain EWA as a special case. Using alternative divergences enables unbounded losses, at the cost of a worst regret bound in some cases.}\n}", "pdf": "http://proceedings.mlr.press/v139/alquier21a/alquier21a.pdf", "supp": "", "pdf_size": 378089, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12850414584239938300&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "RIKEN AIP, Tokyo, Japan", "aff_domain": "riken.jp", "email": "riken.jp", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/alquier21a.html", "aff_unique_index": "0", "aff_unique_norm": "RIKEN AIP", "aff_unique_dep": "", "aff_unique_url": "https://aip.Riken.jp", "aff_unique_abbr": "RIKEN AIP", "aff_campus_unique_index": "0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0", "aff_country_unique": "Japan" }, { "title": "Non-Negative Bregman Divergence Minimization for Deep Direct Density Ratio Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8605", "id": "8605", "proceeding": "http://proceedings.mlr.press/v139/kato21a.html", "slides": "/media/icml-2021/Slides/8605.pdf", "author_site": "Masahiro Kato, Takeshi Teshima", "author": "Masahiro Kato; Takeshi Teshima", "abstract": "Density ratio estimation (DRE) is at the core of various machine learning tasks such as anomaly detection and domain adaptation. In the DRE literature, existing studies have extensively studied methods based on Bregman divergence (BD) minimization. However, when we apply the BD minimization with highly flexible models, such as deep neural networks, it tends to suffer from what we call train-loss hacking, which is a source of over-fitting caused by a typical characteristic of empirical BD estimators. In this paper, to mitigate train-loss hacking, we propose non-negative correction for empirical BD estimators. Theoretically, we confirm the soundness of the proposed method through a generalization error bound. In our experiments, the proposed methods show favorable performances in inlier-based outlier detection.", "bibtex": "@InProceedings{pmlr-v139-kato21a,\n title = \t {Non-Negative Bregman Divergence Minimization for Deep Direct Density Ratio Estimation},\n author = {Kato, Masahiro and Teshima, Takeshi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5320--5333},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kato21a/kato21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kato21a.html},\n abstract = \t {Density ratio estimation (DRE) is at the core of various machine learning tasks such as anomaly detection and domain adaptation. In the DRE literature, existing studies have extensively studied methods based on Bregman divergence (BD) minimization. However, when we apply the BD minimization with highly flexible models, such as deep neural networks, it tends to suffer from what we call train-loss hacking, which is a source of over-fitting caused by a typical characteristic of empirical BD estimators. In this paper, to mitigate train-loss hacking, we propose non-negative correction for empirical BD estimators. Theoretically, we confirm the soundness of the proposed method through a generalization error bound. In our experiments, the proposed methods show favorable performances in inlier-based outlier detection.}\n}", "pdf": "http://proceedings.mlr.press/v139/kato21a/kato21a.pdf", "supp": "", "pdf_size": 1001916, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10575793668423594372&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "CyberAgent Inc., Tokyo, Japan; The University of Tokyo, Tokyo, Japan", "aff_domain": "cyberagent.co.jp;ms.k.u-tokyo.ac.jp", "email": "cyberagent.co.jp;ms.k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kato21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "CyberAgent Inc.;University of Tokyo", "aff_unique_dep": ";", "aff_unique_url": "https://www.cyberagent.co.jp;https://www.u-tokyo.ac.jp", "aff_unique_abbr": ";UTokyo", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "title": "Nondeterminism and Instability in Neural Network Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9001", "id": "9001", "proceeding": "http://proceedings.mlr.press/v139/summers21a.html", "slides": "", "author_site": "Cecilia Summers, Michael J Dinneen", "author": "Cecilia Summers; Michael J. Dinneen", "abstract": "Nondeterminism in neural network optimization produces uncertainty in performance, making small improvements difficult to discern from run-to-run variability. While uncertainty can be reduced by training multiple model copies, doing so is time-consuming, costly, and harms reproducibility. In this work, we establish an experimental protocol for understanding the effect of optimization nondeterminism on model diversity, allowing us to isolate the effects of a variety of sources of nondeterminism. Surprisingly, we find that all sources of nondeterminism have similar effects on measures of model diversity. To explain this intriguing fact, we identify the instability of model training, taken as an end-to-end procedure, as the key determinant. We show that even one-bit changes in initial parameters result in models converging to vastly different values. Last, we propose two approaches for reducing the effects of instability on run-to-run variability.", "bibtex": "@InProceedings{pmlr-v139-summers21a,\n title = \t {Nondeterminism and Instability in Neural Network Optimization},\n author = {Summers, Cecilia and Dinneen, Michael J.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9913--9922},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/summers21a/summers21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/summers21a.html},\n abstract = \t {Nondeterminism in neural network optimization produces uncertainty in performance, making small improvements difficult to discern from run-to-run variability. While uncertainty can be reduced by training multiple model copies, doing so is time-consuming, costly, and harms reproducibility. In this work, we establish an experimental protocol for understanding the effect of optimization nondeterminism on model diversity, allowing us to isolate the effects of a variety of sources of nondeterminism. Surprisingly, we find that all sources of nondeterminism have similar effects on measures of model diversity. To explain this intriguing fact, we identify the instability of model training, taken as an end-to-end procedure, as the key determinant. We show that even one-bit changes in initial parameters result in models converging to vastly different values. Last, we propose two approaches for reducing the effects of instability on run-to-run variability.}\n}", "pdf": "http://proceedings.mlr.press/v139/summers21a/summers21a.pdf", "supp": "", "pdf_size": 990766, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3721428237004074314&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Auckland, Auckland, New Zealand; Department of Computer Science, University of Auckland, Auckland, New Zealand", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/summers21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Auckland", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.auckland.ac.nz", "aff_unique_abbr": "UoA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Auckland", "aff_country_unique_index": "0;0", "aff_country_unique": "New Zealand" }, { "title": "Nonmyopic Multifidelity Acitve Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10057", "id": "10057", "proceeding": "http://proceedings.mlr.press/v139/nguyen21f.html", "slides": "", "author_site": "Quan Nguyen, Arghavan Modiri, Roman Garnett", "author": "Quan Nguyen; Arghavan Modiri; Roman Garnett", "abstract": "Active search is a learning paradigm where we seek to identify as many members of a rare, valuable class as possible given a labeling budget. Previous work on active search has assumed access to a faithful (and expensive) oracle reporting experimental results. However, some settings offer access to cheaper surrogates such as computational simulation that may aid in the search. We propose a model of multifidelity active search, as well as a novel, computationally efficient policy for this setting that is motivated by state-of-the-art classical policies. Our policy is nonmyopic and budget aware, allowing for a dynamic tradeoff between exploration and exploitation. We evaluate the performance of our solution on real-world datasets and demonstrate significantly better performance than natural benchmarks.", "bibtex": "@InProceedings{pmlr-v139-nguyen21f,\n title = \t {Nonmyopic Multifidelity Acitve Search},\n author = {Nguyen, Quan and Modiri, Arghavan and Garnett, Roman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8109--8118},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21f/nguyen21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21f.html},\n abstract = \t {Active search is a learning paradigm where we seek to identify as many members of a rare, valuable class as possible given a labeling budget. Previous work on active search has assumed access to a faithful (and expensive) oracle reporting experimental results. However, some settings offer access to cheaper surrogates such as computational simulation that may aid in the search. We propose a model of multifidelity active search, as well as a novel, computationally efficient policy for this setting that is motivated by state-of-the-art classical policies. Our policy is nonmyopic and budget aware, allowing for a dynamic tradeoff between exploration and exploitation. We evaluate the performance of our solution on real-world datasets and demonstrate significantly better performance than natural benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21f/nguyen21f.pdf", "supp": "", "pdf_size": 319123, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2191414215507326946&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Washington University in St. Louis, MO, USA; University of Toronto, Toronto, Canada; Washington University in St. Louis, MO, USA", "aff_domain": "wustl.edu; ; ", "email": "wustl.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nguyen21f.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Washington University in St. Louis;University of Toronto", "aff_unique_dep": ";", "aff_unique_url": "https://wustl.edu;https://www.utoronto.ca", "aff_unique_abbr": "WUSTL;U of T", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "St. Louis;Toronto", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "Nonparametric Decomposition of Sparse Tensors", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10017", "id": "10017", "proceeding": "http://proceedings.mlr.press/v139/tillinghast21a.html", "slides": "", "author_site": "Conor Tillinghast, Shandian Zhe", "author": "Conor Tillinghast; Shandian Zhe", "abstract": "Tensor decomposition is a powerful framework for multiway data analysis. Despite the success of existing approaches, they ignore the sparse nature of the tensor data in many real-world applications, explicitly or implicitly assuming dense tensors. To address this model misspecification and to exploit the sparse tensor structures, we propose Nonparametric dEcomposition of Sparse Tensors (\\ours), which can capture both the sparse structure properties and complex relationships between the tensor nodes to enhance the embedding estimation. Specifically, we first use completely random measures to construct tensor-valued random processes. We prove that the entry growth is much slower than that of the corresponding tensor size, which implies sparsity. Given finite observations (\\ie projections), we then propose two nonparametric decomposition models that couple Dirichlet processes and Gaussian processes to jointly sample the sparse entry indices and the entry values (the latter as a nonlinear mapping of the embeddings), so as to encode both the structure properties and nonlinear relationships of the tensor nodes into the embeddings. Finally, we use the stick-breaking construction and random Fourier features to develop a scalable, stochastic variational learning algorithm. We show the advantage of our approach in sparse tensor generation, and entry index and value prediction in several real-world applications.", "bibtex": "@InProceedings{pmlr-v139-tillinghast21a,\n title = \t {Nonparametric Decomposition of Sparse Tensors},\n author = {Tillinghast, Conor and Zhe, Shandian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10301--10311},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tillinghast21a/tillinghast21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tillinghast21a.html},\n abstract = \t {Tensor decomposition is a powerful framework for multiway data analysis. Despite the success of existing approaches, they ignore the sparse nature of the tensor data in many real-world applications, explicitly or implicitly assuming dense tensors. To address this model misspecification and to exploit the sparse tensor structures, we propose Nonparametric dEcomposition of Sparse Tensors (\\ours), which can capture both the sparse structure properties and complex relationships between the tensor nodes to enhance the embedding estimation. Specifically, we first use completely random measures to construct tensor-valued random processes. We prove that the entry growth is much slower than that of the corresponding tensor size, which implies sparsity. Given finite observations (\\ie projections), we then propose two nonparametric decomposition models that couple Dirichlet processes and Gaussian processes to jointly sample the sparse entry indices and the entry values (the latter as a nonlinear mapping of the embeddings), so as to encode both the structure properties and nonlinear relationships of the tensor nodes into the embeddings. Finally, we use the stick-breaking construction and random Fourier features to develop a scalable, stochastic variational learning algorithm. We show the advantage of our approach in sparse tensor generation, and entry index and value prediction in several real-world applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/tillinghast21a/tillinghast21a.pdf", "supp": "", "pdf_size": 624256, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18126476011964001257&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "School of Computing, University of Utah; School of Computing, University of Utah", "aff_domain": "cs.utah.edu;cs.utah.edu", "email": "cs.utah.edu;cs.utah.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/tillinghast21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Utah", "aff_unique_dep": "School of Computing", "aff_unique_url": "https://www.utah.edu", "aff_unique_abbr": "U of U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Utah", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Nonparametric Hamiltonian Monte Carlo", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8811", "id": "8811", "proceeding": "http://proceedings.mlr.press/v139/mak21a.html", "slides": "", "author_site": "Carol Mak, Fabian Zaiser, Luke Ong", "author": "Carol Mak; Fabian Zaiser; Luke Ong", "abstract": "Probabilistic programming uses programs to express generative models whose posterior probability is then computed by built-in inference engines. A challenging goal is to develop general purpose inference algorithms that work out-of-the-box for arbitrary programs in a universal probabilistic programming language (PPL). The densities defined by such programs, which may use stochastic branching and recursion, are (in general) nonparametric, in the sense that they correspond to models on an infinite-dimensional parameter space. However standard inference algorithms, such as the Hamiltonian Monte Carlo (HMC) algorithm, target distributions with a fixed number of parameters. This paper introduces the Nonparametric Hamiltonian Monte Carlo (NP-HMC) algorithm which generalises HMC to nonparametric models. Inputs to NP-HMC are a new class of measurable functions called \u201ctree representable\u201d, which serve as a language-independent representation of the density functions of probabilistic programs in a universal PPL. We provide a correctness proof of NP-HMC, and empirically demonstrate significant performance improvements over existing approaches on several nonparametric examples.", "bibtex": "@InProceedings{pmlr-v139-mak21a,\n title = \t {Nonparametric Hamiltonian Monte Carlo},\n author = {Mak, Carol and Zaiser, Fabian and Ong, Luke},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7336--7347},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mak21a/mak21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mak21a.html},\n abstract = \t {Probabilistic programming uses programs to express generative models whose posterior probability is then computed by built-in inference engines. A challenging goal is to develop general purpose inference algorithms that work out-of-the-box for arbitrary programs in a universal probabilistic programming language (PPL). The densities defined by such programs, which may use stochastic branching and recursion, are (in general) nonparametric, in the sense that they correspond to models on an infinite-dimensional parameter space. However standard inference algorithms, such as the Hamiltonian Monte Carlo (HMC) algorithm, target distributions with a fixed number of parameters. This paper introduces the Nonparametric Hamiltonian Monte Carlo (NP-HMC) algorithm which generalises HMC to nonparametric models. Inputs to NP-HMC are a new class of measurable functions called \u201ctree representable\u201d, which serve as a language-independent representation of the density functions of probabilistic programs in a universal PPL. We provide a correctness proof of NP-HMC, and empirically demonstrate significant performance improvements over existing approaches on several nonparametric examples.}\n}", "pdf": "http://proceedings.mlr.press/v139/mak21a/mak21a.pdf", "supp": "", "pdf_size": 2561979, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15980590487021793124&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Oxford, United Kingdom; Department of Computer Science, University of Oxford, United Kingdom; Department of Computer Science, University of Oxford, United Kingdom", "aff_domain": "cs.ox.ac.uk; ; ", "email": "cs.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/mak21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Not All Memories are Created Equal: Learning to Forget by Expiring", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10741", "id": "10741", "proceeding": "http://proceedings.mlr.press/v139/sukhbaatar21a.html", "slides": "/media/icml-2021/Slides/10741.pdf", "author_site": "Sainbayar Sukhbaatar, Da JU, Spencer Poff, Stephen Roller, Arthur Szlam, Jason Weston, Angela Fan", "author": "Sainbayar Sukhbaatar; Da Ju; Spencer Poff; Stephen Roller; Arthur Szlam; Jason Weston; Angela Fan", "abstract": "Attention mechanisms have shown promising results in sequence modeling tasks that require long-term memory. Recent work investigated mechanisms to reduce the computational cost of preserving and storing memories. However, not all content in the past is equally important to remember. We propose Expire-Span, a method that learns to retain the most important information and expire the irrelevant information. This forgetting of memories enables Transformers to scale to attend over tens of thousands of previous timesteps efficiently, as not all states from previous timesteps are preserved. We demonstrate that Expire-Span can help models identify and retain critical information and show it can achieve strong performance on reinforcement learning tasks specifically designed to challenge this functionality. Next, we show that Expire-Span can scale to memories that are tens of thousands in size, setting a new state of the art on incredibly long context tasks such as character-level language modeling and a frame-by-frame moving objects task. Finally, we analyze the efficiency of Expire-Span compared to existing approaches and demonstrate that it trains faster and uses less memory.", "bibtex": "@InProceedings{pmlr-v139-sukhbaatar21a,\n title = \t {Not All Memories are Created Equal: Learning to Forget by Expiring},\n author = {Sukhbaatar, Sainbayar and Ju, Da and Poff, Spencer and Roller, Stephen and Szlam, Arthur and Weston, Jason and Fan, Angela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9902--9912},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sukhbaatar21a/sukhbaatar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sukhbaatar21a.html},\n abstract = \t {Attention mechanisms have shown promising results in sequence modeling tasks that require long-term memory. Recent work investigated mechanisms to reduce the computational cost of preserving and storing memories. However, not all content in the past is equally important to remember. We propose Expire-Span, a method that learns to retain the most important information and expire the irrelevant information. This forgetting of memories enables Transformers to scale to attend over tens of thousands of previous timesteps efficiently, as not all states from previous timesteps are preserved. We demonstrate that Expire-Span can help models identify and retain critical information and show it can achieve strong performance on reinforcement learning tasks specifically designed to challenge this functionality. Next, we show that Expire-Span can scale to memories that are tens of thousands in size, setting a new state of the art on incredibly long context tasks such as character-level language modeling and a frame-by-frame moving objects task. Finally, we analyze the efficiency of Expire-Span compared to existing approaches and demonstrate that it trains faster and uses less memory.}\n}", "pdf": "http://proceedings.mlr.press/v139/sukhbaatar21a/sukhbaatar21a.pdf", "supp": "", "pdf_size": 1619371, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18323176449983399592&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research + LORIA", "aff_domain": "fb.com; ; ; ; ; ;fb.com", "email": "fb.com; ; ; ; ; ;fb.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/sukhbaatar21a.html", "aff_unique_index": "0;0;0;0;0;0;0+1", "aff_unique_norm": "Meta;LORIA", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.loria.fr", "aff_unique_abbr": "FAIR;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0+1", "aff_country_unique": "United States;France" }, { "title": "Object Segmentation Without Labels with Large-Scale Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8667", "id": "8667", "proceeding": "http://proceedings.mlr.press/v139/voynov21a.html", "slides": "", "author_site": "Andrey Voynov, Stanislav Morozov, Artem Babenko", "author": "Andrey Voynov; Stanislav Morozov; Artem Babenko", "abstract": "The recent rise of unsupervised and self-supervised learning has dramatically reduced the dependency on labeled data, providing high-quality representations for transfer on downstream tasks. Furthermore, recent works also employed these representations in a fully unsupervised setup for image classification, reducing the need for human labels on the fine-tuning stage as well. This work demonstrates that large-scale unsupervised models can also perform a more challenging object segmentation task, requiring neither pixel-level nor image-level labeling. Namely, we show that recent unsupervised GANs allow to differentiate between foreground/background pixels, providing high-quality saliency masks. By extensive comparison on common benchmarks, we outperform existing unsupervised alternatives for object segmentation, achieving new state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-voynov21a,\n title = \t {Object Segmentation Without Labels with Large-Scale Generative Models},\n author = {Voynov, Andrey and Morozov, Stanislav and Babenko, Artem},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10596--10606},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/voynov21a/voynov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/voynov21a.html},\n abstract = \t {The recent rise of unsupervised and self-supervised learning has dramatically reduced the dependency on labeled data, providing high-quality representations for transfer on downstream tasks. Furthermore, recent works also employed these representations in a fully unsupervised setup for image classification, reducing the need for human labels on the fine-tuning stage as well. This work demonstrates that large-scale unsupervised models can also perform a more challenging object segmentation task, requiring neither pixel-level nor image-level labeling. Namely, we show that recent unsupervised GANs allow to differentiate between foreground/background pixels, providing high-quality saliency masks. By extensive comparison on common benchmarks, we outperform existing unsupervised alternatives for object segmentation, achieving new state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/voynov21a/voynov21a.pdf", "supp": "", "pdf_size": 4073594, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7466808437204273550&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Yandex, Moscow, Russia; Yandex, Moscow, Russia; Yandex, Moscow, Russia", "aff_domain": "yandex.ru; ; ", "email": "yandex.ru; ; ", "github": "https://github.com/anvoynov/BigGANsAreWatching", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/voynov21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Yandex", "aff_unique_dep": "", "aff_unique_url": "https://yandex.com", "aff_unique_abbr": "Yandex", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Moscow", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Russian Federation" }, { "title": "Objective Bound Conditional Gaussian Process for Bayesian Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10235", "id": "10235", "proceeding": "http://proceedings.mlr.press/v139/jeong21a.html", "slides": "/media/icml-2021/Slides/10235.pdf", "author_site": "Taewon Jeong, Heeyoung Kim", "author": "Taewon Jeong; Heeyoung Kim", "abstract": "A Gaussian process is a standard surrogate model for an unknown objective function in Bayesian optimization. In this paper, we propose a new surrogate model, called the objective bound conditional Gaussian process (OBCGP), to condition a Gaussian process on a bound on the optimal function value. The bound is obtained and updated as the best observed value during the sequential optimization procedure. Unlike the standard Gaussian process, the OBCGP explicitly incorporates the existence of a point that improves the best known bound. We treat the location of such a point as a model parameter and estimate it jointly with other parameters by maximizing the likelihood using variational inference. Within the standard Bayesian optimization framework, the OBCGP can be combined with various acquisition functions to select the next query point. In particular, we derive cumulative regret bounds for the OBCGP combined with the upper confidence bound acquisition algorithm. Furthermore, the OBCGP can inherently incorporate a new type of prior knowledge, i.e., the bounds on the optimum, if it is available. The incorporation of this type of prior knowledge into a surrogate model has not been studied previously. We demonstrate the effectiveness of the OBCGP through its application to Bayesian optimization tasks, such as the sequential design of experiments and hyperparameter optimization in neural networks.", "bibtex": "@InProceedings{pmlr-v139-jeong21a,\n title = \t {Objective Bound Conditional Gaussian Process for Bayesian Optimization},\n author = {Jeong, Taewon and Kim, Heeyoung},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4819--4828},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jeong21a/jeong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jeong21a.html},\n abstract = \t {A Gaussian process is a standard surrogate model for an unknown objective function in Bayesian optimization. In this paper, we propose a new surrogate model, called the objective bound conditional Gaussian process (OBCGP), to condition a Gaussian process on a bound on the optimal function value. The bound is obtained and updated as the best observed value during the sequential optimization procedure. Unlike the standard Gaussian process, the OBCGP explicitly incorporates the existence of a point that improves the best known bound. We treat the location of such a point as a model parameter and estimate it jointly with other parameters by maximizing the likelihood using variational inference. Within the standard Bayesian optimization framework, the OBCGP can be combined with various acquisition functions to select the next query point. In particular, we derive cumulative regret bounds for the OBCGP combined with the upper confidence bound acquisition algorithm. Furthermore, the OBCGP can inherently incorporate a new type of prior knowledge, i.e., the bounds on the optimum, if it is available. The incorporation of this type of prior knowledge into a surrogate model has not been studied previously. We demonstrate the effectiveness of the OBCGP through its application to Bayesian optimization tasks, such as the sequential design of experiments and hyperparameter optimization in neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/jeong21a/jeong21a.pdf", "supp": "", "pdf_size": 1309980, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6979211646907844900&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea; Department of Industrial and Systems Engineering, KAIST, Daejeon, Republic of Korea", "aff_domain": "kaist.ac.kr;kaist.ac.kr", "email": "kaist.ac.kr;kaist.ac.kr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jeong21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "KAIST", "aff_unique_dep": "Department of Industrial and Systems Engineering", "aff_unique_url": "https://www.kaist.ac.kr", "aff_unique_abbr": "KAIST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Daejeon", "aff_country_unique_index": "0;0", "aff_country_unique": "South Korea" }, { "title": "Oblivious Sketching for Logistic Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10263", "id": "10263", "proceeding": "http://proceedings.mlr.press/v139/munteanu21a.html", "slides": "", "author_site": "Alexander Munteanu, Simon Omlor, David Woodruff", "author": "Alexander Munteanu; Simon Omlor; David Woodruff", "abstract": "What guarantees are possible for solving logistic regression in one pass over a data stream? To answer this question, we present the first data oblivious sketch for logistic regression. Our sketch can be computed in input sparsity time over a turnstile data stream and reduces the size of a $d$-dimensional data set from $n$ to only $\\operatorname{poly}(\\mu d\\log n)$ weighted points, where $\\mu$ is a useful parameter which captures the complexity of compressing the data. Solving (weighted) logistic regression on the sketch gives an $O(\\log n)$-approximation to the original problem on the full data set. We also show how to obtain an $O(1)$-approximation with slight modifications. Our sketches are fast, simple, easy to implement, and our experiments demonstrate their practicality.", "bibtex": "@InProceedings{pmlr-v139-munteanu21a,\n title = \t {Oblivious Sketching for Logistic Regression},\n author = {Munteanu, Alexander and Omlor, Simon and Woodruff, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7861--7871},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/munteanu21a/munteanu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/munteanu21a.html},\n abstract = \t {What guarantees are possible for solving logistic regression in one pass over a data stream? To answer this question, we present the first data oblivious sketch for logistic regression. Our sketch can be computed in input sparsity time over a turnstile data stream and reduces the size of a $d$-dimensional data set from $n$ to only $\\operatorname{poly}(\\mu d\\log n)$ weighted points, where $\\mu$ is a useful parameter which captures the complexity of compressing the data. Solving (weighted) logistic regression on the sketch gives an $O(\\log n)$-approximation to the original problem on the full data set. We also show how to obtain an $O(1)$-approximation with slight modifications. Our sketches are fast, simple, easy to implement, and our experiments demonstrate their practicality.}\n}", "pdf": "http://proceedings.mlr.press/v139/munteanu21a/munteanu21a.pdf", "supp": "", "pdf_size": 506125, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16316892732322711108&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Dortmund Data Science Center, Faculties of Statistics and Computer Science, TU Dortmund University, Dortmund, Germany+Faculty of Statistics, TU Dortmund University, Dortmund, Germany; Faculty of Statistics, TU Dortmund University, Dortmund, Germany; Department of Computer Science, Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "tu-dortmund.de;tu-dortmund.de;cs.cmu.edu", "email": "tu-dortmund.de;tu-dortmund.de;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/munteanu21a.html", "aff_unique_index": "0+0;0;1", "aff_unique_norm": "TU Dortmund University;Carnegie Mellon University", "aff_unique_dep": "Faculties of Statistics and Computer Science;Department of Computer Science", "aff_unique_url": "https://www.tu-dortmund.de;https://www.cmu.edu", "aff_unique_abbr": "TU Dortmund;CMU", "aff_campus_unique_index": "0+0;0;1", "aff_campus_unique": "Dortmund;Pittsburgh", "aff_country_unique_index": "0+0;0;1", "aff_country_unique": "Germany;United States" }, { "title": "Oblivious Sketching-based Central Path Method for Linear Programming", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10621", "id": "10621", "proceeding": "http://proceedings.mlr.press/v139/song21e.html", "slides": "", "author_site": "Zhao Song, Zheng Yu", "author": "Zhao Song; Zheng Yu", "abstract": "In this work, we propose a sketching-based central path method for solving linear programmings, whose running time matches the state of the art results [Cohen, Lee, Song STOC 19; Lee, Song, Zhang COLT 19]. Our method opens up the iterations of the central path method and deploys an \"iterate and sketch\" approach towards the problem by introducing a new coordinate-wise embedding technique, which may be of independent interest. Compare to previous methods, the work [Cohen, Lee, Song STOC 19] enjoys feasibility while being non-oblivious, and [Lee, Song, Zhang COLT 19] is oblivious but infeasible, and relies on $\\mathit{dense}$ sketching matrices such as subsampled randomized Hadamard/Fourier transform matrices. Our method enjoys the benefits of being both oblivious and feasible, and can use $\\mathit{sparse}$ sketching matrix [Nelson, Nguyen FOCS 13] to speed up the online matrix-vector multiplication. Our framework for solving LP naturally generalizes to a broader class of convex optimization problems including empirical risk minimization.", "bibtex": "@InProceedings{pmlr-v139-song21e,\n title = \t {Oblivious Sketching-based Central Path Method for Linear Programming},\n author = {Song, Zhao and Yu, Zheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9835--9847},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/song21e/song21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/song21e.html},\n abstract = \t {In this work, we propose a sketching-based central path method for solving linear programmings, whose running time matches the state of the art results [Cohen, Lee, Song STOC 19; Lee, Song, Zhang COLT 19]. Our method opens up the iterations of the central path method and deploys an \"iterate and sketch\" approach towards the problem by introducing a new coordinate-wise embedding technique, which may be of independent interest. Compare to previous methods, the work [Cohen, Lee, Song STOC 19] enjoys feasibility while being non-oblivious, and [Lee, Song, Zhang COLT 19] is oblivious but infeasible, and relies on $\\mathit{dense}$ sketching matrices such as subsampled randomized Hadamard/Fourier transform matrices. Our method enjoys the benefits of being both oblivious and feasible, and can use $\\mathit{sparse}$ sketching matrix [Nelson, Nguyen FOCS 13] to speed up the online matrix-vector multiplication. Our framework for solving LP naturally generalizes to a broader class of convex optimization problems including empirical risk minimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/song21e/song21e.pdf", "supp": "", "pdf_size": 447416, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14820189579208901847&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "School of Mathematics, Institute for Advanced Study, United States; Department of Operations Research and Financial Engineering, Princeton University, United States", "aff_domain": "gmail.com;princeton.edu", "email": "gmail.com;princeton.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/song21e.html", "aff_unique_index": "0;1", "aff_unique_norm": "Institute for Advanced Study;Princeton University", "aff_unique_dep": "School of Mathematics;Department of Operations Research and Financial Engineering", "aff_unique_url": "https://www.ias.edu;https://www.princeton.edu", "aff_unique_abbr": "IAS;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Of Moments and Matching: A Game-Theoretic Framework for Closing the Imitation Gap", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9401", "id": "9401", "proceeding": "http://proceedings.mlr.press/v139/swamy21a.html", "slides": "/media/icml-2021/Slides/9401.pdf", "author_site": "Gokul Swamy, Sanjiban Choudhury, J. Bagnell, Steven Wu", "author": "Gokul Swamy; Sanjiban Choudhury; J. Andrew Bagnell; Steven Wu", "abstract": "We provide a unifying view of a large family of previous imitation learning algorithms through the lens of moment matching. At its core, our classification scheme is based on whether the learner attempts to match (1) reward or (2) action-value moments of the expert\u2019s behavior, with each option leading to differing algorithmic approaches. By considering adversarially chosen divergences between learner and expert behavior, we are able to derive bounds on policy performance that apply for all algorithms in each of these classes, the first to our knowledge. We also introduce the notion of moment recoverability, implicit in many previous analyses of imitation learning, which allows us to cleanly delineate how well each algorithmic family is able to mitigate compounding errors. We derive three novel algorithm templates (AdVIL, AdRIL, and DAeQuIL) with strong guarantees, simple implementation, and competitive empirical performance.", "bibtex": "@InProceedings{pmlr-v139-swamy21a,\n title = \t {Of Moments and Matching: A Game-Theoretic Framework for Closing the Imitation Gap},\n author = {Swamy, Gokul and Choudhury, Sanjiban and Bagnell, J. Andrew and Wu, Steven},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10022--10032},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/swamy21a/swamy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/swamy21a.html},\n abstract = \t {We provide a unifying view of a large family of previous imitation learning algorithms through the lens of moment matching. At its core, our classification scheme is based on whether the learner attempts to match (1) reward or (2) action-value moments of the expert\u2019s behavior, with each option leading to differing algorithmic approaches. By considering adversarially chosen divergences between learner and expert behavior, we are able to derive bounds on policy performance that apply for all algorithms in each of these classes, the first to our knowledge. We also introduce the notion of moment recoverability, implicit in many previous analyses of imitation learning, which allows us to cleanly delineate how well each algorithmic family is able to mitigate compounding errors. We derive three novel algorithm templates (AdVIL, AdRIL, and DAeQuIL) with strong guarantees, simple implementation, and competitive empirical performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/swamy21a/swamy21a.pdf", "supp": "", "pdf_size": 785280, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7938694148424637226&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Robotics Institute, Carnegie Mellon University; Aurora Innovation; Robotics Institute, Carnegie Mellon University + Institute for Software Research, Carnegie Mellon University; Institute for Software Research, Carnegie Mellon University", "aff_domain": "cmu.edu; ; ; ", "email": "cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/swamy21a.html", "aff_unique_index": "0;1;0+0;0", "aff_unique_norm": "Carnegie Mellon University;Aurora Innovation", "aff_unique_dep": "Robotics Institute;", "aff_unique_url": "https://www.cmu.edu;https://aurora.tech", "aff_unique_abbr": "CMU;Aurora", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh;", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Off-Belief Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9801", "id": "9801", "proceeding": "http://proceedings.mlr.press/v139/hu21c.html", "slides": "", "author_site": "Hengyuan Hu, Adam Lerer, Brandon Cui, Luis Pineda, Noam Brown, Jakob Foerster", "author": "Hengyuan Hu; Adam Lerer; Brandon Cui; Luis Pineda; Noam Brown; Jakob Foerster", "abstract": "The standard problem setting in Dec-POMDPs is self-play, where the goal is to find a set of policies that play optimally together. Policies learned through self-play may adopt arbitrary conventions and implicitly rely on multi-step reasoning based on fragile assumptions about other agents\u2019 actions and thus fail when paired with humans or independently trained agents at test time. To address this, we present off-belief learning (OBL). At each timestep OBL agents follow a policy $\\pi_1$ that is optimized assuming past actions were taken by a given, fixed policy ($\\pi_0$), but assuming that future actions will be taken by $\\pi_1$. When $\\pi_0$ is uniform random, OBL converges to an optimal policy that does not rely on inferences based on other agents\u2019 behavior (an optimal grounded policy). OBL can be iterated in a hierarchy, where the optimal policy from one level becomes the input to the next, thereby introducing multi-level cognitive reasoning in a controlled manner. Unlike existing approaches, which may converge to any equilibrium policy, OBL converges to a unique policy, making it suitable for zero-shot coordination (ZSC). OBL can be scaled to high-dimensional settings with a fictitious transition mechanism and shows strong performance in both a toy-setting and the benchmark human-AI & ZSC problem Hanabi.", "bibtex": "@InProceedings{pmlr-v139-hu21c,\n title = \t {Off-Belief Learning},\n author = {Hu, Hengyuan and Lerer, Adam and Cui, Brandon and Pineda, Luis and Brown, Noam and Foerster, Jakob},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4369--4379},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hu21c/hu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/hu21c.html},\n abstract = \t {The standard problem setting in Dec-POMDPs is self-play, where the goal is to find a set of policies that play optimally together. Policies learned through self-play may adopt arbitrary conventions and implicitly rely on multi-step reasoning based on fragile assumptions about other agents\u2019 actions and thus fail when paired with humans or independently trained agents at test time. To address this, we present off-belief learning (OBL). At each timestep OBL agents follow a policy $\\pi_1$ that is optimized assuming past actions were taken by a given, fixed policy ($\\pi_0$), but assuming that future actions will be taken by $\\pi_1$. When $\\pi_0$ is uniform random, OBL converges to an optimal policy that does not rely on inferences based on other agents\u2019 behavior (an optimal grounded policy). OBL can be iterated in a hierarchy, where the optimal policy from one level becomes the input to the next, thereby introducing multi-level cognitive reasoning in a controlled manner. Unlike existing approaches, which may converge to any equilibrium policy, OBL converges to a unique policy, making it suitable for zero-shot coordination (ZSC). OBL can be scaled to high-dimensional settings with a fictitious transition mechanism and shows strong performance in both a toy-setting and the benchmark human-AI & ZSC problem Hanabi.}\n}", "pdf": "http://proceedings.mlr.press/v139/hu21c/hu21c.pdf", "supp": "", "pdf_size": 1067201, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9880359834919449179&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 4, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com; ; ; ; ;fb.com", "email": "fb.com; ; ; ; ;fb.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/hu21c.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Off-Policy Confidence Sequences", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10471", "id": "10471", "proceeding": "http://proceedings.mlr.press/v139/karampatziakis21a.html", "slides": "/media/icml-2021/Slides/10471.pdf", "author_site": "Nikos Karampatziakis, Paul Mineiro, Aaditya Ramdas", "author": "Nikos Karampatziakis; Paul Mineiro; Aaditya Ramdas", "abstract": "We develop confidence bounds that hold uniformly over time for off-policy evaluation in the contextual bandit setting. These confidence sequences are based on recent ideas from martingale analysis and are non-asymptotic, non-parametric, and valid at arbitrary stopping times. We provide algorithms for computing these confidence sequences that strike a good balance between computational and statistical efficiency. We empirically demonstrate the tightness of our approach in terms of failure probability and width and apply it to the \u201cgated deployment\u201d problem of safely upgrading a production contextual bandit system.", "bibtex": "@InProceedings{pmlr-v139-karampatziakis21a,\n title = \t {Off-Policy Confidence Sequences},\n author = {Karampatziakis, Nikos and Mineiro, Paul and Ramdas, Aaditya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5301--5310},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/karampatziakis21a/karampatziakis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/karampatziakis21a.html},\n abstract = \t {We develop confidence bounds that hold uniformly over time for off-policy evaluation in the contextual bandit setting. These confidence sequences are based on recent ideas from martingale analysis and are non-asymptotic, non-parametric, and valid at arbitrary stopping times. We provide algorithms for computing these confidence sequences that strike a good balance between computational and statistical efficiency. We empirically demonstrate the tightness of our approach in terms of failure probability and width and apply it to the \u201cgated deployment\u201d problem of safely upgrading a production contextual bandit system.}\n}", "pdf": "http://proceedings.mlr.press/v139/karampatziakis21a/karampatziakis21a.pdf", "supp": "", "pdf_size": 696727, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13669553715034800409&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Microsoft Azure AI; Microsoft Research; Carnegie Mellon University", "aff_domain": "microsoft.com; ; ", "email": "microsoft.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/karampatziakis21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Microsoft;Carnegie Mellon University", "aff_unique_dep": "Azure AI;", "aff_unique_url": "https://azure.microsoft.com/en-us/ai;https://www.cmu.edu", "aff_unique_abbr": "Microsoft Azure AI;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Offline Contextual Bandits with Overparameterized Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10477", "id": "10477", "proceeding": "http://proceedings.mlr.press/v139/brandfonbrener21a.html", "slides": "", "author_site": "David Brandfonbrener, William Whitney, Rajesh Ranganath, Joan Bruna", "author": "David Brandfonbrener; William Whitney; Rajesh Ranganath; Joan Bruna", "abstract": "Recent results in supervised learning suggest that while overparameterized models have the capacity to overfit, they in fact generalize quite well. We ask whether the same phenomenon occurs for offline contextual bandits. Our results are mixed. Value-based algorithms benefit from the same generalization behavior as overparameterized supervised learning, but policy-based algorithms do not. We show that this discrepancy is due to the \\emph{action-stability} of their objectives. An objective is action-stable if there exists a prediction (action-value vector or action distribution) which is optimal no matter which action is observed. While value-based objectives are action-stable, policy-based objectives are unstable. We formally prove upper bounds on the regret of overparameterized value-based learning and lower bounds on the regret for policy-based algorithms. In our experiments with large neural networks, this gap between action-stable value-based objectives and unstable policy-based objectives leads to significant performance differences.", "bibtex": "@InProceedings{pmlr-v139-brandfonbrener21a,\n title = \t {Offline Contextual Bandits with Overparameterized Models},\n author = {Brandfonbrener, David and Whitney, William and Ranganath, Rajesh and Bruna, Joan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1049--1058},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brandfonbrener21a/brandfonbrener21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brandfonbrener21a.html},\n abstract = \t {Recent results in supervised learning suggest that while overparameterized models have the capacity to overfit, they in fact generalize quite well. We ask whether the same phenomenon occurs for offline contextual bandits. Our results are mixed. Value-based algorithms benefit from the same generalization behavior as overparameterized supervised learning, but policy-based algorithms do not. We show that this discrepancy is due to the \\emph{action-stability} of their objectives. An objective is action-stable if there exists a prediction (action-value vector or action distribution) which is optimal no matter which action is observed. While value-based objectives are action-stable, policy-based objectives are unstable. We formally prove upper bounds on the regret of overparameterized value-based learning and lower bounds on the regret for policy-based algorithms. In our experiments with large neural networks, this gap between action-stable value-based objectives and unstable policy-based objectives leads to significant performance differences.}\n}", "pdf": "http://proceedings.mlr.press/v139/brandfonbrener21a/brandfonbrener21a.pdf", "supp": "", "pdf_size": 613685, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11852183431002924037&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Courant Institute of Mathematical Sciences, New York University, New York, New York, USA; Courant Institute of Mathematical Sciences, New York University, New York, New York, USA; Courant Institute of Mathematical Sciences, New York University, New York, New York, USA; Courant Institute of Mathematical Sciences, New York University, New York, New York, USA", "aff_domain": "nyu.edu; ; ; ", "email": "nyu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/brandfonbrener21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "New York University", "aff_unique_dep": "Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Offline Meta-Reinforcement Learning with Advantage Weighting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8719", "id": "8719", "proceeding": "http://proceedings.mlr.press/v139/mitchell21a.html", "slides": "", "author_site": "Eric Mitchell, Rafael Rafailov, Xue Bin Peng, Sergey Levine, Chelsea Finn", "author": "Eric Mitchell; Rafael Rafailov; Xue Bin Peng; Sergey Levine; Chelsea Finn", "abstract": "This paper introduces the offline meta-reinforcement learning (offline meta-RL) problem setting and proposes an algorithm that performs well in this setting. Offline meta-RL is analogous to the widely successful supervised learning strategy of pre-training a model on a large batch of fixed, pre-collected data (possibly from various tasks) and fine-tuning the model to a new task with relatively little data. That is, in offline meta-RL, we meta-train on fixed, pre-collected data from several tasks and adapt to a new task with a very small amount (less than 5 trajectories) of data from the new task. By nature of being offline, algorithms for offline meta-RL can utilize the largest possible pool of training data available and eliminate potentially unsafe or costly data collection during meta-training. This setting inherits the challenges of offline RL, but it differs significantly because offline RL does not generally consider a) transfer to new tasks or b) limited data from the test task, both of which we face in offline meta-RL. Targeting the offline meta-RL setting, we propose Meta-Actor Critic with Advantage Weighting (MACAW). MACAW is an optimization-based meta-learning algorithm that uses simple, supervised regression objectives for both the inner and outer loop of meta-training. On offline variants of common meta-RL benchmarks, we empirically find that this approach enables fully offline meta-reinforcement learning and achieves notable gains over prior methods.", "bibtex": "@InProceedings{pmlr-v139-mitchell21a,\n title = \t {Offline Meta-Reinforcement Learning with Advantage Weighting},\n author = {Mitchell, Eric and Rafailov, Rafael and Peng, Xue Bin and Levine, Sergey and Finn, Chelsea},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7780--7791},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mitchell21a/mitchell21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mitchell21a.html},\n abstract = \t {This paper introduces the offline meta-reinforcement learning (offline meta-RL) problem setting and proposes an algorithm that performs well in this setting. Offline meta-RL is analogous to the widely successful supervised learning strategy of pre-training a model on a large batch of fixed, pre-collected data (possibly from various tasks) and fine-tuning the model to a new task with relatively little data. That is, in offline meta-RL, we meta-train on fixed, pre-collected data from several tasks and adapt to a new task with a very small amount (less than 5 trajectories) of data from the new task. By nature of being offline, algorithms for offline meta-RL can utilize the largest possible pool of training data available and eliminate potentially unsafe or costly data collection during meta-training. This setting inherits the challenges of offline RL, but it differs significantly because offline RL does not generally consider a) transfer to new tasks or b) limited data from the test task, both of which we face in offline meta-RL. Targeting the offline meta-RL setting, we propose Meta-Actor Critic with Advantage Weighting (MACAW). MACAW is an optimization-based meta-learning algorithm that uses simple, supervised regression objectives for both the inner and outer loop of meta-training. On offline variants of common meta-RL benchmarks, we empirically find that this approach enables fully offline meta-reinforcement learning and achieves notable gains over prior methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/mitchell21a/mitchell21a.pdf", "supp": "", "pdf_size": 1328719, "gs_citation": 135, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17977945892617234025&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Stanford University; Stanford University; University of California, Berkeley; University of California, Berkeley; Stanford University", "aff_domain": "cs.stanford.edu; ; ; ; ", "email": "cs.stanford.edu; ; ; ; ", "github": "", "project": "https://sites.google.com/view/macaw-metarl", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mitchell21a.html", "aff_unique_index": "0;0;1;1;0", "aff_unique_norm": "Stanford University;University of California, Berkeley", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.berkeley.edu", "aff_unique_abbr": "Stanford;UC Berkeley", "aff_campus_unique_index": "0;0;1;1;0", "aff_campus_unique": "Stanford;Berkeley", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Offline Reinforcement Learning with Fisher Divergence Critic Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8977", "id": "8977", "proceeding": "http://proceedings.mlr.press/v139/kostrikov21a.html", "slides": "", "author_site": "Ilya Kostrikov, Rob Fergus, Jonathan Tompson, Ofir Nachum", "author": "Ilya Kostrikov; Rob Fergus; Jonathan Tompson; Ofir Nachum", "abstract": "Many modern approaches to offline Reinforcement Learning (RL) utilize behavior regularization, typically augmenting a model-free actor critic algorithm with a penalty measuring divergence of the policy from the offline data. In this work, we propose an alternative approach to encouraging the learned policy to stay close to the data, namely parameterizing the critic as the log-behavior-policy, which generated the offline data, plus a state-action value offset term, which can be learned using a neural network. Behavior regularization then corresponds to an appropriate regularizer on the offset term. We propose using a gradient penalty regularizer for the offset term and demonstrate its equivalence to Fisher divergence regularization, suggesting connections to the score matching and generative energy-based model literature. We thus term our resulting algorithm Fisher-BRC (Behavior Regularized Critic). On standard offline RL benchmarks, Fisher-BRC achieves both improved performance and faster convergence over existing state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-kostrikov21a,\n title = \t {Offline Reinforcement Learning with Fisher Divergence Critic Regularization},\n author = {Kostrikov, Ilya and Fergus, Rob and Tompson, Jonathan and Nachum, Ofir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5774--5783},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kostrikov21a/kostrikov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kostrikov21a.html},\n abstract = \t {Many modern approaches to offline Reinforcement Learning (RL) utilize behavior regularization, typically augmenting a model-free actor critic algorithm with a penalty measuring divergence of the policy from the offline data. In this work, we propose an alternative approach to encouraging the learned policy to stay close to the data, namely parameterizing the critic as the log-behavior-policy, which generated the offline data, plus a state-action value offset term, which can be learned using a neural network. Behavior regularization then corresponds to an appropriate regularizer on the offset term. We propose using a gradient penalty regularizer for the offset term and demonstrate its equivalence to Fisher divergence regularization, suggesting connections to the score matching and generative energy-based model literature. We thus term our resulting algorithm Fisher-BRC (Behavior Regularized Critic). On standard offline RL benchmarks, Fisher-BRC achieves both improved performance and faster convergence over existing state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/kostrikov21a/kostrikov21a.pdf", "supp": "", "pdf_size": 1633455, "gs_citation": 364, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4410288794309638335&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "New York University, USA+Google Research, USA+Google DeepMind, USA; Google Research, USA; New York University, USA+Google DeepMind, USA; Google Research, USA", "aff_domain": "cs.nyu.edu; ; ; ", "email": "cs.nyu.edu; ; ; ", "github": "https://github.com/google-research/google-research/tree/master/fisher_brc", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kostrikov21a.html", "aff_unique_index": "0+1+1;1;0+1;1", "aff_unique_norm": "New York University;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.nyu.edu;https://research.google", "aff_unique_abbr": "NYU;Google", "aff_campus_unique_index": "1;1;;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0+0+0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Offline Reinforcement Learning with Pseudometric Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10353", "id": "10353", "proceeding": "http://proceedings.mlr.press/v139/dadashi21a.html", "slides": "", "author_site": "Robert Dadashi, Shideh Rezaeifar, Nino Vieillard, L\u00e9onard Hussenot, Olivier Pietquin, Matthieu Geist", "author": "Robert Dadashi; Shideh Rezaeifar; Nino Vieillard; L\u00e9onard Hussenot; Olivier Pietquin; Matthieu Geist", "abstract": "Offline Reinforcement Learning methods seek to learn a policy from logged transitions of an environment, without any interaction. In the presence of function approximation, and under the assumption of limited coverage of the state-action space of the environment, it is necessary to enforce the policy to visit state-action pairs close to the support of logged transitions. In this work, we propose an iterative procedure to learn a pseudometric (closely related to bisimulation metrics) from logged transitions, and use it to define this notion of closeness. We show its convergence and extend it to the function approximation setting. We then use this pseudometric to define a new lookup based bonus in an actor-critic algorithm: PLOFF. This bonus encourages the actor to stay close, in terms of the defined pseudometric, to the support of logged transitions. Finally, we evaluate the method on hand manipulation and locomotion tasks.", "bibtex": "@InProceedings{pmlr-v139-dadashi21a,\n title = \t {Offline Reinforcement Learning with Pseudometric Learning},\n author = {Dadashi, Robert and Rezaeifar, Shideh and Vieillard, Nino and Hussenot, L{\\'e}onard and Pietquin, Olivier and Geist, Matthieu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2307--2318},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dadashi21a/dadashi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dadashi21a.html},\n abstract = \t {Offline Reinforcement Learning methods seek to learn a policy from logged transitions of an environment, without any interaction. In the presence of function approximation, and under the assumption of limited coverage of the state-action space of the environment, it is necessary to enforce the policy to visit state-action pairs close to the support of logged transitions. In this work, we propose an iterative procedure to learn a pseudometric (closely related to bisimulation metrics) from logged transitions, and use it to define this notion of closeness. We show its convergence and extend it to the function approximation setting. We then use this pseudometric to define a new lookup based bonus in an actor-critic algorithm: PLOFF. This bonus encourages the actor to stay close, in terms of the defined pseudometric, to the support of logged transitions. Finally, we evaluate the method on hand manipulation and locomotion tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/dadashi21a/dadashi21a.pdf", "supp": "", "pdf_size": 5553548, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5482862595467885690&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Google Research, Brain Team; University of Geneva; Universit \u00b4e de Lorraine, CNRS, Inria, IECL, F-54000 Nancy, France; Univ. de Lille, CNRS, Inria Scool, UMR 9189 CRIStAL; Google Research, Brain Team; Google Research, Brain Team", "aff_domain": "google.com; ; ; ; ; ", "email": "google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/dadashi21a.html", "aff_unique_index": "0;1;2;3;0;0", "aff_unique_norm": "Google;University of Geneva;Universit\u00e9 de Lorraine;University of Lille", "aff_unique_dep": "Google Research;;;Inria Scool, UMR 9189 CRIStAL", "aff_unique_url": "https://research.google;https://www.unige.ch;https://www.univ-lorraine.fr;https://www.univ-lille.fr", "aff_unique_abbr": "Google;UNIGE;UL;Univ. de Lille", "aff_campus_unique_index": "0;2;0;0", "aff_campus_unique": "Mountain View;;Nancy", "aff_country_unique_index": "0;1;2;2;0;0", "aff_country_unique": "United States;Switzerland;France" }, { "title": "OmniNet: Omnidirectional Representations from Transformers", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9301", "id": "9301", "proceeding": "http://proceedings.mlr.press/v139/tay21b.html", "slides": "", "author_site": "Yi Tay, Mostafa Dehghani, Vamsi Aribandi, Jai Gupta, Philip Pham, Zhen Qin, Dara Bahri, Da-Cheng Juan, Don Metzler", "author": "Yi Tay; Mostafa Dehghani; Vamsi Aribandi; Jai Gupta; Philip M Pham; Zhen Qin; Dara Bahri; Da-Cheng Juan; Donald Metzler", "abstract": "This paper proposes Omnidirectional Representations from Transformers (OMNINET). In OmniNet, instead of maintaining a strictly horizon-tal receptive field, each token is allowed to attend to all tokens in the entire network. This process can also be interpreted as a form of extreme or intensive attention mechanism that has the receptive field of the entire width and depth of the network. To this end, the omnidirectional attention is learned via a meta-learner, which is essentially another self-attention based model. In order to mitigate the computationally expensive costs of full receptive field attention, we leverage efficient self-attention models such as kernel-based, low-rank attention and/or Big Bird as the meta-learner. Extensive experiments are conducted on autoregressive language modeling(LM1B, C4), Machine Translation, Long Range Arena (LRA), and Image Recognition.The experiments show that OmniNet achieves considerable improvements across these tasks, including achieving state-of-the-art performance on LM1B,WMT\u201914 En-De/En-Fr, and Long Range Arena.Moreover, using omnidirectional representation in Vision Transformers leads to significant improvements on image recognition tasks on both few-shot learning and fine-tuning setups.", "bibtex": "@InProceedings{pmlr-v139-tay21b,\n title = \t {OmniNet: Omnidirectional Representations from Transformers},\n author = {Tay, Yi and Dehghani, Mostafa and Aribandi, Vamsi and Gupta, Jai and Pham, Philip M and Qin, Zhen and Bahri, Dara and Juan, Da-Cheng and Metzler, Donald},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10193--10202},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tay21b/tay21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/tay21b.html},\n abstract = \t {This paper proposes Omnidirectional Representations from Transformers (OMNINET). In OmniNet, instead of maintaining a strictly horizon-tal receptive field, each token is allowed to attend to all tokens in the entire network. This process can also be interpreted as a form of extreme or intensive attention mechanism that has the receptive field of the entire width and depth of the network. To this end, the omnidirectional attention is learned via a meta-learner, which is essentially another self-attention based model. In order to mitigate the computationally expensive costs of full receptive field attention, we leverage efficient self-attention models such as kernel-based, low-rank attention and/or Big Bird as the meta-learner. Extensive experiments are conducted on autoregressive language modeling(LM1B, C4), Machine Translation, Long Range Arena (LRA), and Image Recognition.The experiments show that OmniNet achieves considerable improvements across these tasks, including achieving state-of-the-art performance on LM1B,WMT\u201914 En-De/En-Fr, and Long Range Arena.Moreover, using omnidirectional representation in Vision Transformers leads to significant improvements on image recognition tasks on both few-shot learning and fine-tuning setups.}\n}", "pdf": "http://proceedings.mlr.press/v139/tay21b/tay21b.pdf", "supp": "", "pdf_size": 2324291, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17083877326564507670&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google Research, Mountain View; Google Brain Team, Amsterdam; Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View; Google Research, Mountain View", "aff_domain": "google.com;google.com; ; ; ; ; ; ; ", "email": "google.com;google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/tay21b.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;1;0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View;Amsterdam", "aff_country_unique_index": "0;1;0;0;0;0;0;0;0", "aff_country_unique": "United States;Netherlands" }, { "title": "On Characterizing GAN Convergence Through Proximal Duality Gap", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8559", "id": "8559", "proceeding": "http://proceedings.mlr.press/v139/sidheekh21a.html", "slides": "/media/icml-2021/Slides/8559.pdf", "author_site": "Sahil Sidheekh, Aroof Aimen, Narayanan Chatapuram Krishnan", "author": "Sahil Sidheekh; Aroof Aimen; Narayanan C Krishnan", "abstract": "Despite the accomplishments of Generative Adversarial Networks (GANs) in modeling data distributions, training them remains a challenging task. A contributing factor to this difficulty is the non-intuitive nature of the GAN loss curves, which necessitates a subjective evaluation of the generated output to infer training progress. Recently, motivated by game theory, Duality Gap has been proposed as a domain agnostic measure to monitor GAN training. However, it is restricted to the setting when the GAN converges to a Nash equilibrium. But GANs need not always converge to a Nash equilibrium to model the data distribution. In this work, we extend the notion of duality gap to proximal duality gap that is applicable to the general context of training GANs where Nash equilibria may not exist. We show theoretically that the proximal duality gap can monitor the convergence of GANs to a broader spectrum of equilibria that subsumes Nash equilibria. We also theoretically establish the relationship between the proximal duality gap and the divergence between the real and generated data distributions for different GAN formulations. Our results provide new insights into the nature of GAN convergence. Finally, we validate experimentally the usefulness of proximal duality gap for monitoring and influencing GAN training.", "bibtex": "@InProceedings{pmlr-v139-sidheekh21a,\n title = \t {On Characterizing GAN Convergence Through Proximal Duality Gap},\n author = {Sidheekh, Sahil and Aimen, Aroof and Krishnan, Narayanan C},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9660--9670},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sidheekh21a/sidheekh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sidheekh21a.html},\n abstract = \t {Despite the accomplishments of Generative Adversarial Networks (GANs) in modeling data distributions, training them remains a challenging task. A contributing factor to this difficulty is the non-intuitive nature of the GAN loss curves, which necessitates a subjective evaluation of the generated output to infer training progress. Recently, motivated by game theory, Duality Gap has been proposed as a domain agnostic measure to monitor GAN training. However, it is restricted to the setting when the GAN converges to a Nash equilibrium. But GANs need not always converge to a Nash equilibrium to model the data distribution. In this work, we extend the notion of duality gap to proximal duality gap that is applicable to the general context of training GANs where Nash equilibria may not exist. We show theoretically that the proximal duality gap can monitor the convergence of GANs to a broader spectrum of equilibria that subsumes Nash equilibria. We also theoretically establish the relationship between the proximal duality gap and the divergence between the real and generated data distributions for different GAN formulations. Our results provide new insights into the nature of GAN convergence. Finally, we validate experimentally the usefulness of proximal duality gap for monitoring and influencing GAN training.}\n}", "pdf": "http://proceedings.mlr.press/v139/sidheekh21a/sidheekh21a.pdf", "supp": "", "pdf_size": 715644, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16988175738385537443&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Indian Institute of Technology, Ropar, India; Department of Computer Science, Indian Institute of Technology, Ropar, India; Department of Computer Science, Indian Institute of Technology, Ropar, India", "aff_domain": "iitrpr.ac.in; ;iitrpr.ac.in", "email": "iitrpr.ac.in; ;iitrpr.ac.in", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sidheekh21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Indian Institute of Technology, Ropar", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://iitrpr.ac.in", "aff_unique_abbr": "IIT Ropar", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Ropar", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "On Disentangled Representations Learned from Correlated Data", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9351", "id": "9351", "proceeding": "http://proceedings.mlr.press/v139/trauble21a.html", "slides": "", "author_site": "Frederik Tr\u00e4uble, Elliot Creager, Niki Kilbertus, Francesco Locatello, Andrea Dittadi, Anirudh Goyal, Bernhard Sch\u00f6lkopf, Stefan Bauer", "author": "Frederik Tr\u00e4uble; Elliot Creager; Niki Kilbertus; Francesco Locatello; Andrea Dittadi; Anirudh Goyal; Bernhard Sch\u00f6lkopf; Stefan Bauer", "abstract": "The focus of disentanglement approaches has been on identifying independent factors of variation in data. However, the causal variables underlying real-world observations are often not statistically independent. In this work, we bridge the gap to real-world scenarios by analyzing the behavior of the most prominent disentanglement approaches on correlated data in a large-scale empirical study (including 4260 models). We show and quantify that systematically induced correlations in the dataset are being learned and reflected in the latent representations, which has implications for downstream applications of disentanglement such as fairness. We also demonstrate how to resolve these latent correlations, either using weak supervision during training or by post-hoc correcting a pre-trained model with a small number of labels.", "bibtex": "@InProceedings{pmlr-v139-trauble21a,\n title = \t {On Disentangled Representations Learned from Correlated Data},\n author = {Tr{\\\"a}uble, Frederik and Creager, Elliot and Kilbertus, Niki and Locatello, Francesco and Dittadi, Andrea and Goyal, Anirudh and Sch{\\\"o}lkopf, Bernhard and Bauer, Stefan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10401--10412},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/trauble21a/trauble21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/trauble21a.html},\n abstract = \t {The focus of disentanglement approaches has been on identifying independent factors of variation in data. However, the causal variables underlying real-world observations are often not statistically independent. In this work, we bridge the gap to real-world scenarios by analyzing the behavior of the most prominent disentanglement approaches on correlated data in a large-scale empirical study (including 4260 models). We show and quantify that systematically induced correlations in the dataset are being learned and reflected in the latent representations, which has implications for downstream applications of disentanglement such as fairness. We also demonstrate how to resolve these latent correlations, either using weak supervision during training or by post-hoc correcting a pre-trained model with a small number of labels.}\n}", "pdf": "http://proceedings.mlr.press/v139/trauble21a/trauble21a.pdf", "supp": "", "pdf_size": 2432165, "gs_citation": 142, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10644866140945749570&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/trauble21a.html" }, { "title": "On Energy-Based Models with Overparametrized Shallow Neural Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10311", "id": "10311", "proceeding": "http://proceedings.mlr.press/v139/domingo-enrich21a.html", "slides": "/media/icml-2021/Slides/10311.pdf", "author_site": "Carles Domingo-Enrich, Alberto Bietti, Eric Vanden-Eijnden, Joan Bruna", "author": "Carles Domingo-Enrich; Alberto Bietti; Eric Vanden-Eijnden; Joan Bruna", "abstract": "Energy-based models (EBMs) are a simple yet powerful framework for generative modeling. They are based on a trainable energy function which defines an associated Gibbs measure, and they can be trained and sampled from via well-established statistical tools, such as MCMC. Neural networks may be used as energy function approximators, providing both a rich class of expressive models as well as a flexible device to incorporate data structure. In this work we focus on shallow neural networks. Building from the incipient theory of overparametrized neural networks, we show that models trained in the so-called \u2019active\u2019 regime provide a statistical advantage over their associated \u2019lazy\u2019 or kernel regime, leading to improved adaptivity to hidden low-dimensional structure in the data distribution, as already observed in supervised learning. Our study covers both the maximum likelihood and Stein Discrepancy estimators, and we validate our theoretical results with numerical experiments on synthetic data.", "bibtex": "@InProceedings{pmlr-v139-domingo-enrich21a,\n title = \t {On Energy-Based Models with Overparametrized Shallow Neural Networks},\n author = {Domingo-Enrich, Carles and Bietti, Alberto and Vanden-Eijnden, Eric and Bruna, Joan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2771--2782},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/domingo-enrich21a/domingo-enrich21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/domingo-enrich21a.html},\n abstract = \t {Energy-based models (EBMs) are a simple yet powerful framework for generative modeling. They are based on a trainable energy function which defines an associated Gibbs measure, and they can be trained and sampled from via well-established statistical tools, such as MCMC. Neural networks may be used as energy function approximators, providing both a rich class of expressive models as well as a flexible device to incorporate data structure. In this work we focus on shallow neural networks. Building from the incipient theory of overparametrized neural networks, we show that models trained in the so-called \u2019active\u2019 regime provide a statistical advantage over their associated \u2019lazy\u2019 or kernel regime, leading to improved adaptivity to hidden low-dimensional structure in the data distribution, as already observed in supervised learning. Our study covers both the maximum likelihood and Stein Discrepancy estimators, and we validate our theoretical results with numerical experiments on synthetic data.}\n}", "pdf": "http://proceedings.mlr.press/v139/domingo-enrich21a/domingo-enrich21a.pdf", "supp": "", "pdf_size": 5325508, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2626488009584096909&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Courant Institute of Mathematical Sciences, New York University+Center for Data Science, New York University; Center for Data Science, New York University; Courant Institute of Mathematical Sciences, New York University; Courant Institute of Mathematical Sciences, New York University+Center for Data Science, New York University", "aff_domain": "nyu.edu; ; ; ", "email": "nyu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/domingo-enrich21a.html", "aff_unique_index": "0+0;0;0;0+0", "aff_unique_norm": "New York University", "aff_unique_dep": "Courant Institute of Mathematical Sciences", "aff_unique_url": "https://www.courant.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "0+0;0;0;0+0", "aff_campus_unique": "New York", "aff_country_unique_index": "0+0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "On Estimation in Latent Variable Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10071", "id": "10071", "proceeding": "http://proceedings.mlr.press/v139/fang21a.html", "slides": "", "author_site": "Guanhua Fang, Ping Li", "author": "Guanhua Fang; Ping Li", "abstract": "Latent variable models have been playing a central role in statistics, econometrics, machine learning with applications to repeated observation study, panel data inference, user behavior analysis, etc. In many modern applications, the inference based on latent variable models involves one or several of the following features: the presence of complex latent structure, the observed and latent variables being continuous or discrete, constraints on parameters, and data size being large. Therefore, solving an estimation problem for general latent variable models is highly non-trivial. In this paper, we consider a gradient based method via using variance reduction technique to accelerate estimation procedure. Theoretically, we show the convergence results for the proposed method under general and mild model assumptions. The algorithm has better computational complexity compared with the classical gradient methods and maintains nice statistical properties. Various numerical results corroborate our theory.", "bibtex": "@InProceedings{pmlr-v139-fang21a,\n title = \t {On Estimation in Latent Variable Models},\n author = {Fang, Guanhua and Li, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3100--3110},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fang21a/fang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fang21a.html},\n abstract = \t {Latent variable models have been playing a central role in statistics, econometrics, machine learning with applications to repeated observation study, panel data inference, user behavior analysis, etc. In many modern applications, the inference based on latent variable models involves one or several of the following features: the presence of complex latent structure, the observed and latent variables being continuous or discrete, constraints on parameters, and data size being large. Therefore, solving an estimation problem for general latent variable models is highly non-trivial. In this paper, we consider a gradient based method via using variance reduction technique to accelerate estimation procedure. Theoretically, we show the convergence results for the proposed method under general and mild model assumptions. The algorithm has better computational complexity compared with the classical gradient methods and maintains nice statistical properties. Various numerical results corroborate our theory.}\n}", "pdf": "http://proceedings.mlr.press/v139/fang21a/fang21a.pdf", "supp": "", "pdf_size": 563661, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8883749270183927378&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/fang21a.html" }, { "title": "On Explainability of Graph Neural Networks via Subgraph Explorations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8873", "id": "8873", "proceeding": "http://proceedings.mlr.press/v139/yuan21c.html", "slides": "", "author_site": "Hao Yuan, Haiyang Yu, Jie Wang, Kang Li, Shuiwang Ji", "author": "Hao Yuan; Haiyang Yu; Jie Wang; Kang Li; Shuiwang Ji", "abstract": "We consider the problem of explaining the predictions of graph neural networks (GNNs), which otherwise are considered as black boxes. Existing methods invariably focus on explaining the importance of graph nodes or edges but ignore the substructures of graphs, which are more intuitive and human-intelligible. In this work, we propose a novel method, known as SubgraphX, to explain GNNs by identifying important subgraphs. Given a trained GNN model and an input graph, our SubgraphX explains its predictions by efficiently exploring different subgraphs with Monte Carlo tree search. To make the tree search more effective, we propose to use Shapley values as a measure of subgraph importance, which can also capture the interactions among different subgraphs. To expedite computations, we propose efficient approximation schemes to compute Shapley values for graph data. Our work represents the first attempt to explain GNNs via identifying subgraphs explicitly and directly. Experimental results show that our SubgraphX achieves significantly improved explanations, while keeping computations at a reasonable level.", "bibtex": "@InProceedings{pmlr-v139-yuan21c,\n title = \t {On Explainability of Graph Neural Networks via Subgraph Explorations},\n author = {Yuan, Hao and Yu, Haiyang and Wang, Jie and Li, Kang and Ji, Shuiwang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12241--12252},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yuan21c/yuan21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yuan21c.html},\n abstract = \t {We consider the problem of explaining the predictions of graph neural networks (GNNs), which otherwise are considered as black boxes. Existing methods invariably focus on explaining the importance of graph nodes or edges but ignore the substructures of graphs, which are more intuitive and human-intelligible. In this work, we propose a novel method, known as SubgraphX, to explain GNNs by identifying important subgraphs. Given a trained GNN model and an input graph, our SubgraphX explains its predictions by efficiently exploring different subgraphs with Monte Carlo tree search. To make the tree search more effective, we propose to use Shapley values as a measure of subgraph importance, which can also capture the interactions among different subgraphs. To expedite computations, we propose efficient approximation schemes to compute Shapley values for graph data. Our work represents the first attempt to explain GNNs via identifying subgraphs explicitly and directly. Experimental results show that our SubgraphX achieves significantly improved explanations, while keeping computations at a reasonable level.}\n}", "pdf": "http://proceedings.mlr.press/v139/yuan21c/yuan21c.pdf", "supp": "", "pdf_size": 1281070, "gs_citation": 523, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11591948097171126043&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science & Engineering, Texas A&M University, TX, USA; Department of Computer Science & Engineering, Texas A&M University, TX, USA; Department of Electronic Engineering and Information Science, University of Science and Technology of China, Hefei, China; West China Biomedical Big Data Center, West China Hospital, Chengdu, China; Department of Computer Science & Engineering, Texas A&M University, TX, USA", "aff_domain": "tamu.edu; ; ; ;tamu.edu", "email": "tamu.edu; ; ; ;tamu.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yuan21c.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Texas A&M University;University of Science and Technology of China;West China Hospital", "aff_unique_dep": "Department of Computer Science & Engineering;Department of Electronic Engineering and Information Science;West China Biomedical Big Data Center", "aff_unique_url": "https://www.tamu.edu;http://www.ustc.edu.cn;", "aff_unique_abbr": "TAMU;USTC;", "aff_campus_unique_index": "0;0;1;2;0", "aff_campus_unique": "TX;Hefei;Chengdu", "aff_country_unique_index": "0;0;1;1;0", "aff_country_unique": "United States;China" }, { "title": "On Learnability via Gradient Method for Two-Layer ReLU Neural Networks in Teacher-Student Setting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9199", "id": "9199", "proceeding": "http://proceedings.mlr.press/v139/akiyama21a.html", "slides": "", "author_site": "Shunta Akiyama, Taiji Suzuki", "author": "Shunta Akiyama; Taiji Suzuki", "abstract": "Deep learning empirically achieves high performance in many applications, but its training dynamics has not been fully understood theoretically. In this paper, we explore theoretical analysis on training two-layer ReLU neural networks in a teacher-student regression model, in which a student network learns an unknown teacher network through its outputs. We show that with a specific regularization and sufficient over-parameterization, the student network can identify the parameters of the teacher network with high probability via gradient descent with a norm dependent stepsize even though the objective function is highly non-convex. The key theoretical tool is the measure representation of the neural networks and a novel application of a dual certificate argument for sparse estimation on a measure space. We analyze the global minima and global convergence property in the measure space.", "bibtex": "@InProceedings{pmlr-v139-akiyama21a,\n title = \t {On Learnability via Gradient Method for Two-Layer ReLU Neural Networks in Teacher-Student Setting},\n author = {Akiyama, Shunta and Suzuki, Taiji},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {152--162},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/akiyama21a/akiyama21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/akiyama21a.html},\n abstract = \t {Deep learning empirically achieves high performance in many applications, but its training dynamics has not been fully understood theoretically. In this paper, we explore theoretical analysis on training two-layer ReLU neural networks in a teacher-student regression model, in which a student network learns an unknown teacher network through its outputs. We show that with a specific regularization and sufficient over-parameterization, the student network can identify the parameters of the teacher network with high probability via gradient descent with a norm dependent stepsize even though the objective function is highly non-convex. The key theoretical tool is the measure representation of the neural networks and a novel application of a dual certificate argument for sparse estimation on a measure space. We analyze the global minima and global convergence property in the measure space.}\n}", "pdf": "http://proceedings.mlr.press/v139/akiyama21a/akiyama21a.pdf", "supp": "", "pdf_size": 1555871, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8117866795241176612&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan+Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan; Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan+Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan", "aff_domain": "mist.i-tokyo.ac.jp;mist.i.u-tokyo.ac.jp", "email": "mist.i-tokyo.ac.jp;mist.i.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/akiyama21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "University of Tokyo;RIKEN", "aff_unique_dep": "Graduate School of Information Science and Technology;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "UTokyo;RIKEN", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Japan" }, { "title": "On Limited-Memory Subsampling Strategies for Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9321", "id": "9321", "proceeding": "http://proceedings.mlr.press/v139/baudry21b.html", "slides": "", "author_site": "Dorian Baudry, Yoan Russac, Olivier Capp\u00e9", "author": "Dorian Baudry; Yoan Russac; Olivier Capp\u00e9", "abstract": "There has been a recent surge of interest in non-parametric bandit algorithms based on subsampling. One drawback however of these approaches is the additional complexity required by random subsampling and the storage of the full history of rewards. Our first contribution is to show that a simple deterministic subsampling rule, proposed in the recent work of \\citet{baudry2020sub} under the name of \u201clast-block subsampling\u201d, is asymptotically optimal in one-parameter exponential families. In addition, we prove that these guarantees also hold when limiting the algorithm memory to a polylogarithmic function of the time horizon. These findings open up new perspectives, in particular for non-stationary scenarios in which the arm distributions evolve over time. We propose a variant of the algorithm in which only the most recent observations are used for subsampling, achieving optimal regret guarantees under the assumption of a known number of abrupt changes. Extensive numerical simulations highlight the merits of this approach, particularly when the changes are not only affecting the means of the rewards.", "bibtex": "@InProceedings{pmlr-v139-baudry21b,\n title = \t {On Limited-Memory Subsampling Strategies for Bandits},\n author = {Baudry, Dorian and Russac, Yoan and Capp{\\'e}, Olivier},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {727--737},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/baudry21b/baudry21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/baudry21b.html},\n abstract = \t {There has been a recent surge of interest in non-parametric bandit algorithms based on subsampling. One drawback however of these approaches is the additional complexity required by random subsampling and the storage of the full history of rewards. Our first contribution is to show that a simple deterministic subsampling rule, proposed in the recent work of \\citet{baudry2020sub} under the name of \u201clast-block subsampling\u201d, is asymptotically optimal in one-parameter exponential families. In addition, we prove that these guarantees also hold when limiting the algorithm memory to a polylogarithmic function of the time horizon. These findings open up new perspectives, in particular for non-stationary scenarios in which the arm distributions evolve over time. We propose a variant of the algorithm in which only the most recent observations are used for subsampling, achieving optimal regret guarantees under the assumption of a known number of abrupt changes. Extensive numerical simulations highlight the merits of this approach, particularly when the changes are not only affecting the means of the rewards.}\n}", "pdf": "http://proceedings.mlr.press/v139/baudry21b/baudry21b.pdf", "supp": "", "pdf_size": 1203448, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4227884458802378115&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9198-CRIStAL, F-59000 Lille, France+DI ENS, CNRS, Inria, ENS, Universit\u00e9 PSL, Paris, France; DI ENS, CNRS, Inria, ENS, Universit\u00e9 PSL, Paris, France; DI ENS, CNRS, Inria, ENS, Universit\u00e9 PSL, Paris, France", "aff_domain": "inria.fr;ens.fr; ", "email": "inria.fr;ens.fr; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/baudry21b.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "University of Lille;\u00c9cole Normale Sup\u00e9rieure", "aff_unique_dep": "UMR 9198-CRIStAL;", "aff_unique_url": "https://www.univ-lille.fr;https://www.ens.fr", "aff_unique_abbr": "Univ. Lille;ENS", "aff_campus_unique_index": "0+1;1;1", "aff_campus_unique": "Lille;Paris", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "France" }, { "title": "On Linear Identifiability of Learned Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10089", "id": "10089", "proceeding": "http://proceedings.mlr.press/v139/roeder21a.html", "slides": "", "author_site": "Geoffrey Roeder, Luke Metz, Durk Kingma", "author": "Geoffrey Roeder; Luke Metz; Durk Kingma", "abstract": "Identifiability is a desirable property of a statistical model: it implies that the true model parameters may be estimated to any desired precision, given sufficient computational resources and data. We study identifiability in the context of representation learning: discovering nonlinear data representations that are optimal with respect to some downstream task. When parameterized as deep neural networks, such representation functions lack identifiability in parameter space, because they are over-parameterized by design. In this paper, building on recent advances in nonlinear Independent Components Analysis, we aim to rehabilitate identifiability by showing that a large family of discriminative models are in fact identifiable in function space, up to a linear indeterminacy. Many models for representation learning in a wide variety of domains have been identifiable in this sense, including text, images and audio, state-of-the-art at time of publication. We derive sufficient conditions for linear identifiability and provide empirical support for the result on both simulated and real-world data.", "bibtex": "@InProceedings{pmlr-v139-roeder21a,\n title = \t {On Linear Identifiability of Learned Representations},\n author = {Roeder, Geoffrey and Metz, Luke and Kingma, Durk},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9030--9039},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/roeder21a/roeder21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/roeder21a.html},\n abstract = \t {Identifiability is a desirable property of a statistical model: it implies that the true model parameters may be estimated to any desired precision, given sufficient computational resources and data. We study identifiability in the context of representation learning: discovering nonlinear data representations that are optimal with respect to some downstream task. When parameterized as deep neural networks, such representation functions lack identifiability in parameter space, because they are over-parameterized by design. In this paper, building on recent advances in nonlinear Independent Components Analysis, we aim to rehabilitate identifiability by showing that a large family of discriminative models are in fact identifiable in function space, up to a linear indeterminacy. Many models for representation learning in a wide variety of domains have been identifiable in this sense, including text, images and audio, state-of-the-art at time of publication. We derive sufficient conditions for linear identifiability and provide empirical support for the result on both simulated and real-world data.}\n}", "pdf": "http://proceedings.mlr.press/v139/roeder21a/roeder21a.pdf", "supp": "", "pdf_size": 993131, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15211610123124896160&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Princeton University; Google Brain; Google Brain", "aff_domain": "princeton.edu; ;google.com", "email": "princeton.edu; ;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/roeder21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Princeton University;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://www.princeton.edu;https://brain.google.com", "aff_unique_abbr": "Princeton;Google Brain", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "On Lower Bounds for Standard and Robust Gaussian Process Bandit Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10639", "id": "10639", "proceeding": "http://proceedings.mlr.press/v139/cai21f.html", "slides": "", "author_site": "Xu Cai, Jonathan Scarlett", "author": "Xu Cai; Jonathan Scarlett", "abstract": "In this paper, we consider algorithm independent lower bounds for the problem of black-box optimization of functions having a bounded norm is some Reproducing Kernel Hilbert Space (RKHS), which can be viewed as a non-Bayesian Gaussian process bandit problem. In the standard noisy setting, we provide a novel proof technique for deriving lower bounds on the regret, with benefits including simplicity, versatility, and an improved dependence on the error probability. In a robust setting in which the final point is perturbed by an adversary, we strengthen an existing lower bound that only holds for target success probabilities very close to one, by allowing for arbitrary target success probabilities in (0, 1). Furthermore, in a distinct robust setting in which every sampled point may be perturbed by a constrained adversary, we provide a novel lower bound for deterministic strategies, demonstrating an inevitable joint dependence of the cumulative regret on the corruption level and the time horizon, in contrast with existing lower bounds that only characterize the individual dependencies.", "bibtex": "@InProceedings{pmlr-v139-cai21f,\n title = \t {On Lower Bounds for Standard and Robust Gaussian Process Bandit Optimization},\n author = {Cai, Xu and Scarlett, Jonathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1216--1226},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cai21f/cai21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/cai21f.html},\n abstract = \t {In this paper, we consider algorithm independent lower bounds for the problem of black-box optimization of functions having a bounded norm is some Reproducing Kernel Hilbert Space (RKHS), which can be viewed as a non-Bayesian Gaussian process bandit problem. In the standard noisy setting, we provide a novel proof technique for deriving lower bounds on the regret, with benefits including simplicity, versatility, and an improved dependence on the error probability. In a robust setting in which the final point is perturbed by an adversary, we strengthen an existing lower bound that only holds for target success probabilities very close to one, by allowing for arbitrary target success probabilities in (0, 1). Furthermore, in a distinct robust setting in which every sampled point may be perturbed by a constrained adversary, we provide a novel lower bound for deterministic strategies, demonstrating an inevitable joint dependence of the cumulative regret on the corruption level and the time horizon, in contrast with existing lower bounds that only characterize the individual dependencies.}\n}", "pdf": "http://proceedings.mlr.press/v139/cai21f/cai21f.pdf", "supp": "", "pdf_size": 752011, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2518790427235586429&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, National University of Singapore + Department of Mathematics & Institute of Data Science, National University of Singapore; Department of Computer Science, National University of Singapore + Department of Mathematics & Institute of Data Science, National University of Singapore", "aff_domain": "u.nus.edu;comp.nus.edu.sg", "email": "u.nus.edu;comp.nus.edu.sg", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/cai21f.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "National University of Singapore", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.nus.edu.sg", "aff_unique_abbr": "NUS", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Singapore" }, { "title": "On Monotonic Linear Interpolation of Neural Network Parameters", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10429", "id": "10429", "proceeding": "http://proceedings.mlr.press/v139/lucas21a.html", "slides": "", "author_site": "James Lucas, Juhan Bae, Michael Zhang, Stanislav Fort, Richard Zemel, Roger Grosse", "author": "James R Lucas; Juhan Bae; Michael R Zhang; Stanislav Fort; Richard Zemel; Roger B Grosse", "abstract": "Linear interpolation between initial neural network parameters and converged parameters after training with stochastic gradient descent (SGD) typically leads to a monotonic decrease in the training objective. This Monotonic Linear Interpolation (MLI) property, first observed by Goodfellow et al. 2014, persists in spite of the non-convex objectives and highly non-linear training dynamics of neural networks. Extending this work, we evaluate several hypotheses for this property that, to our knowledge, have not yet been explored. Using tools from differential geometry, we draw connections between the interpolated paths in function space and the monotonicity of the network \u2014 providing sufficient conditions for the MLI property under mean squared error. While the MLI property holds under various settings (e.g., network architectures and learning problems), we show in practice that networks violating the MLI property can be produced systematically, by encouraging the weights to move far from initialization. The MLI property raises important questions about the loss landscape geometry of neural networks and highlights the need to further study their global properties.", "bibtex": "@InProceedings{pmlr-v139-lucas21a,\n title = \t {On Monotonic Linear Interpolation of Neural Network Parameters},\n author = {Lucas, James R and Bae, Juhan and Zhang, Michael R and Fort, Stanislav and Zemel, Richard and Grosse, Roger B},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7168--7179},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lucas21a/lucas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lucas21a.html},\n abstract = \t {Linear interpolation between initial neural network parameters and converged parameters after training with stochastic gradient descent (SGD) typically leads to a monotonic decrease in the training objective. This Monotonic Linear Interpolation (MLI) property, first observed by Goodfellow et al. 2014, persists in spite of the non-convex objectives and highly non-linear training dynamics of neural networks. Extending this work, we evaluate several hypotheses for this property that, to our knowledge, have not yet been explored. Using tools from differential geometry, we draw connections between the interpolated paths in function space and the monotonicity of the network \u2014 providing sufficient conditions for the MLI property under mean squared error. While the MLI property holds under various settings (e.g., network architectures and learning problems), we show in practice that networks violating the MLI property can be produced systematically, by encouraging the weights to move far from initialization. The MLI property raises important questions about the loss landscape geometry of neural networks and highlights the need to further study their global properties.}\n}", "pdf": "http://proceedings.mlr.press/v139/lucas21a/lucas21a.pdf", "supp": "", "pdf_size": 3045516, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14532044697035740084&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Toronto+Vector Institute; University of Toronto+Vector Institute; University of Toronto+Vector Institute; Stanford University; University of Toronto+Vector Institute; University of Toronto+Vector Institute", "aff_domain": "cs.toronto.edu; ; ; ; ; ", "email": "cs.toronto.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/lucas21a.html", "aff_unique_index": "0+1;0+1;0+1;2;0+1;0+1", "aff_unique_norm": "University of Toronto;Vector Institute;Stanford University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.stanford.edu", "aff_unique_abbr": "U of T;Vector Institute;Stanford", "aff_campus_unique_index": ";;;1;;", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0+0;0+0;0+0;1;0+0;0+0", "aff_country_unique": "Canada;United States" }, { "title": "On Perceptual Lossy Compression: The Cost of Perceptual Reconstruction and An Optimal Training Framework", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10279", "id": "10279", "proceeding": "http://proceedings.mlr.press/v139/yan21d.html", "slides": "", "author_site": "Zeyu Yan, Fei Wen, rendong Ying, Chao Ma, Peilin Liu", "author": "Zeyu Yan; Fei Wen; Rendong Ying; Chao Ma; Peilin Liu", "abstract": "Lossy compression algorithms are typically designed to achieve the lowest possible distortion at a given bit rate. However, recent studies show that pursuing high perceptual quality would lead to increase of the lowest achievable distortion (e.g., MSE). This paper provides nontrivial results theoretically revealing that, 1) the cost of achieving perfect perception quality is exactly a doubling of the lowest achievable MSE distortion, 2) an optimal encoder for the \u201cclassic\u201d rate-distortion problem is also optimal for the perceptual compression problem, 3) distortion loss is unnecessary for training a perceptual decoder. Further, we propose a novel training framework to achieve the lowest MSE distortion under perfect perception constraint at a given bit rate. This framework uses a GAN with discriminator conditioned on an MSE-optimized encoder, which is superior over the traditional framework using distortion plus adversarial loss. Experiments are provided to verify the theoretical finding and demonstrate the superiority of the proposed training framework.", "bibtex": "@InProceedings{pmlr-v139-yan21d,\n title = \t {On Perceptual Lossy Compression: The Cost of Perceptual Reconstruction and An Optimal Training Framework},\n author = {Yan, Zeyu and Wen, Fei and Ying, Rendong and Ma, Chao and Liu, Peilin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11682--11692},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yan21d/yan21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/yan21d.html},\n abstract = \t {Lossy compression algorithms are typically designed to achieve the lowest possible distortion at a given bit rate. However, recent studies show that pursuing high perceptual quality would lead to increase of the lowest achievable distortion (e.g., MSE). This paper provides nontrivial results theoretically revealing that, 1) the cost of achieving perfect perception quality is exactly a doubling of the lowest achievable MSE distortion, 2) an optimal encoder for the \u201cclassic\u201d rate-distortion problem is also optimal for the perceptual compression problem, 3) distortion loss is unnecessary for training a perceptual decoder. Further, we propose a novel training framework to achieve the lowest MSE distortion under perfect perception constraint at a given bit rate. This framework uses a GAN with discriminator conditioned on an MSE-optimized encoder, which is superior over the traditional framework using distortion plus adversarial loss. Experiments are provided to verify the theoretical finding and demonstrate the superiority of the proposed training framework.}\n}", "pdf": "http://proceedings.mlr.press/v139/yan21d/yan21d.pdf", "supp": "", "pdf_size": 2213244, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3982169689811841911&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Electronic Information and Electrical Engnieering, Shanghai Jiao Tong University, Shanghai, China; School of Electronic Information and Electrical Engnieering, Shanghai Jiao Tong University, Shanghai, China; School of Electronic Information and Electrical Engnieering, Shanghai Jiao Tong University, Shanghai, China; School of Electronic Information and Electrical Engnieering, Shanghai Jiao Tong University, Shanghai, China; School of Electronic Information and Electrical Engnieering, Shanghai Jiao Tong University, Shanghai, China", "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yan21d.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Shanghai Jiao Tong University", "aff_unique_dep": "School of Electronic Information and Electrical Engineering", "aff_unique_url": "https://www.sjtu.edu.cn", "aff_unique_abbr": "SJTU", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Shanghai", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "On Proximal Policy Optimization\u2019s Heavy-tailed Gradients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9149", "id": "9149", "proceeding": "http://proceedings.mlr.press/v139/garg21b.html", "slides": "", "author_site": "Saurabh Garg, Joshua Zhanson, Emilio Parisotto, Adarsh Prasad, Zico Kolter, Zachary Lipton, Sivaraman Balakrishnan, Ruslan Salakhutdinov, Pradeep Ravikumar", "author": "Saurabh Garg; Joshua Zhanson; Emilio Parisotto; Adarsh Prasad; Zico Kolter; Zachary Lipton; Sivaraman Balakrishnan; Ruslan Salakhutdinov; Pradeep Ravikumar", "abstract": "Modern policy gradient algorithms such as Proximal Policy Optimization (PPO) rely on an arsenal of heuristics, including loss clipping and gradient clipping, to ensure successful learning. These heuristics are reminiscent of techniques from robust statistics, commonly used for estimation in outlier-rich (\"heavy-tailed\") regimes. In this paper, we present a detailed empirical study to characterize the heavy-tailed nature of the gradients of the PPO surrogate reward function. We demonstrate that the gradients, especially for the actor network, exhibit pronounced heavy-tailedness and that it increases as the agent\u2019s policy diverges from the behavioral policy (i.e., as the agent goes further off policy). Further examination implicates the likelihood ratios and advantages in the surrogate reward as the main sources of the observed heavy-tailedness. We then highlight issues arising due to the heavy-tailed nature of the gradients. In this light, we study the effects of the standard PPO clipping heuristics, demonstrating that these tricks primarily serve to offset heavy-tailedness in gradients. Thus motivated, we propose incorporating GMOM, a high-dimensional robust estimator, into PPO as a substitute for three clipping tricks. Despite requiring less hyperparameter tuning, our method matches the performance of PPO (with all heuristics enabled) on a battery of MuJoCo continuous control tasks.", "bibtex": "@InProceedings{pmlr-v139-garg21b,\n title = \t {On Proximal Policy Optimization\u2019s Heavy-tailed Gradients},\n author = {Garg, Saurabh and Zhanson, Joshua and Parisotto, Emilio and Prasad, Adarsh and Kolter, Zico and Lipton, Zachary and Balakrishnan, Sivaraman and Salakhutdinov, Ruslan and Ravikumar, Pradeep},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3610--3619},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/garg21b/garg21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/garg21b.html},\n abstract = \t {Modern policy gradient algorithms such as Proximal Policy Optimization (PPO) rely on an arsenal of heuristics, including loss clipping and gradient clipping, to ensure successful learning. These heuristics are reminiscent of techniques from robust statistics, commonly used for estimation in outlier-rich (\"heavy-tailed\") regimes. In this paper, we present a detailed empirical study to characterize the heavy-tailed nature of the gradients of the PPO surrogate reward function. We demonstrate that the gradients, especially for the actor network, exhibit pronounced heavy-tailedness and that it increases as the agent\u2019s policy diverges from the behavioral policy (i.e., as the agent goes further off policy). Further examination implicates the likelihood ratios and advantages in the surrogate reward as the main sources of the observed heavy-tailedness. We then highlight issues arising due to the heavy-tailed nature of the gradients. In this light, we study the effects of the standard PPO clipping heuristics, demonstrating that these tricks primarily serve to offset heavy-tailedness in gradients. Thus motivated, we propose incorporating GMOM, a high-dimensional robust estimator, into PPO as a substitute for three clipping tricks. Despite requiring less hyperparameter tuning, our method matches the performance of PPO (with all heuristics enabled) on a battery of MuJoCo continuous control tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/garg21b/garg21b.pdf", "supp": "", "pdf_size": 622332, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13161212190574870495&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Machine Learning Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Department of Statistics and Data Science, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University", "aff_domain": "andrew.cmu.edu; ; ; ; ; ; ; ; ", "email": "andrew.cmu.edu; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/garg21b.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Machine Learning Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "On Recovering from Modeling Errors Using Testing Bayesian Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9395", "id": "9395", "proceeding": "http://proceedings.mlr.press/v139/huang21a.html", "slides": "", "author_site": "Haiying Huang, Adnan Darwiche", "author": "Haiying Huang; Adnan Darwiche", "abstract": "We consider the problem of supervised learning with Bayesian Networks when the used dependency structure is incomplete due to missing edges or missing variable states. These modeling errors induce independence constraints on the learned model that may not hold in the true, data-generating distribution. We provide a unified treatment of these modeling errors as instances of state-space abstractions. We then identify a class of Bayesian Networks and queries which allow one to fully recover from such modeling errors if one can choose Conditional Probability Tables (CPTs) dynamically based on evidence. We show theoretically that the recently proposed Testing Bayesian Networks (TBNs), which can be trained by compiling them into Testing Arithmetic Circuits (TACs), provide a promising construct for emulating this CPT selection mechanism. Finally, we present empirical results that illustrate the promise of TBNs as a tool for recovering from certain modeling errors in the context of supervised learning.", "bibtex": "@InProceedings{pmlr-v139-huang21a,\n title = \t {On Recovering from Modeling Errors Using Testing Bayesian Networks},\n author = {Huang, Haiying and Darwiche, Adnan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4402--4411},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21a/huang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21a.html},\n abstract = \t {We consider the problem of supervised learning with Bayesian Networks when the used dependency structure is incomplete due to missing edges or missing variable states. These modeling errors induce independence constraints on the learned model that may not hold in the true, data-generating distribution. We provide a unified treatment of these modeling errors as instances of state-space abstractions. We then identify a class of Bayesian Networks and queries which allow one to fully recover from such modeling errors if one can choose Conditional Probability Tables (CPTs) dynamically based on evidence. We show theoretically that the recently proposed Testing Bayesian Networks (TBNs), which can be trained by compiling them into Testing Arithmetic Circuits (TACs), provide a promising construct for emulating this CPT selection mechanism. Finally, we present empirical results that illustrate the promise of TBNs as a tool for recovering from certain modeling errors in the context of supervised learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21a/huang21a.pdf", "supp": "", "pdf_size": 603292, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5540972103838189028&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Computer Science Department, University of California, Los Angeles, USA; Computer Science Department, University of California, Los Angeles, USA", "aff_domain": "ucla.edu; ", "email": "ucla.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/huang21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On Reinforcement Learning with Adversarial Corruption and Its Application to Block MDP", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8617", "id": "8617", "proceeding": "http://proceedings.mlr.press/v139/wu21g.html", "slides": "/media/icml-2021/Slides/8617.pdf", "author_site": "Tianhao Wu, Yunchang Yang, Simon Du, Liwei Wang", "author": "Tianhao Wu; Yunchang Yang; Simon Du; Liwei Wang", "abstract": "We study reinforcement learning (RL) in episodic tabular MDPs with adversarial corruptions, where some episodes can be adversarially corrupted. When the total number of corrupted episodes is known, we propose an algorithm, Corruption Robust Monotonic Value Propagation (\\textsf{CR-MVP}), which achieves a regret bound of $\\tilde{O}\\left(\\left(\\sqrt{SAK}+S^2A+CSA)\\right)\\polylog(H)\\right)$, where $S$ is the number of states, $A$ is the number of actions, $H$ is the planning horizon, $K$ is the number of episodes, and $C$ is the corruption level. We also provide a corresponding lower bound, which indicates that our upper bound is tight. Finally, as an application, we study RL with rich observations in the block MDP model. We provide the first algorithm that achieves a $\\sqrt{K}$-type regret in this setting and is computationally efficient.", "bibtex": "@InProceedings{pmlr-v139-wu21g,\n title = \t {On Reinforcement Learning with Adversarial Corruption and Its Application to Block MDP},\n author = {Wu, Tianhao and Yang, Yunchang and Du, Simon and Wang, Liwei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11296--11306},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21g/wu21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21g.html},\n abstract = \t {We study reinforcement learning (RL) in episodic tabular MDPs with adversarial corruptions, where some episodes can be adversarially corrupted. When the total number of corrupted episodes is known, we propose an algorithm, Corruption Robust Monotonic Value Propagation (\\textsf{CR-MVP}), which achieves a regret bound of $\\tilde{O}\\left(\\left(\\sqrt{SAK}+S^2A+CSA)\\right)\\polylog(H)\\right)$, where $S$ is the number of states, $A$ is the number of actions, $H$ is the planning horizon, $K$ is the number of episodes, and $C$ is the corruption level. We also provide a corresponding lower bound, which indicates that our upper bound is tight. Finally, as an application, we study RL with rich observations in the block MDP model. We provide the first algorithm that achieves a $\\sqrt{K}$-type regret in this setting and is computationally efficient.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21g/wu21g.pdf", "supp": "", "pdf_size": 377737, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10845339340287043765&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Peking University + Pazhou Lab, Guangzhou, 510330, China; Peking University; University of Washington; Center for Data Science, Peking University + Key Laboratory of Machine Perception, MOE, School of EECS, Peking University", "aff_domain": "cis.pku.edu.cn; ; ;cis.pku.edu.cn", "email": "cis.pku.edu.cn; ; ;cis.pku.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wu21g.html", "aff_unique_index": "0+1;0;2;0+0", "aff_unique_norm": "Peking University;Pazhou Lab;University of Washington", "aff_unique_dep": ";;", "aff_unique_url": "http://www.pku.edu.cn;;https://www.washington.edu", "aff_unique_abbr": "Peking U;;UW", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Guangzhou;Beijing", "aff_country_unique_index": "0+0;0;1;0+0", "aff_country_unique": "China;United States" }, { "title": "On Reward-Free RL with Kernel and Neural Function Approximations: Single-Agent MDP and Markov Game", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8781", "id": "8781", "proceeding": "http://proceedings.mlr.press/v139/qiu21d.html", "slides": "", "author_site": "Shuang Qiu, Jieping Ye, Zhaoran Wang, Zhuoran Yang", "author": "Shuang Qiu; Jieping Ye; Zhaoran Wang; Zhuoran Yang", "abstract": "To achieve sample efficiency in reinforcement learning (RL), it necessitates to efficiently explore the underlying environment. Under the offline setting, addressing the exploration challenge lies in collecting an offline dataset with sufficient coverage. Motivated by such a challenge, we study the reward-free RL problem, where an agent aims to thoroughly explore the environment without any pre-specified reward function. Then, given any extrinsic reward, the agent computes the optimal policy via offline RL with data collected in the exploration stage. Moreover, we tackle this problem under the context of function approximation, leveraging powerful function approximators. Specifically, we propose to explore via an optimistic variant of the value-iteration algorithm incorporating kernel and neural function approximations, where we adopt the associated exploration bonus as the exploration reward. Moreover, we design exploration and planning algorithms for both single-agent MDPs and zero-sum Markov games and prove that our methods can achieve $\\widetilde{\\mathcal{O}}(1 /\\varepsilon^2)$ sample complexity for generating a $\\varepsilon$-suboptimal policy or $\\varepsilon$-approximate Nash equilibrium when given an arbitrary extrinsic reward. To the best of our knowledge, we establish the first provably efficient reward-free RL algorithm with kernel and neural function approximators.", "bibtex": "@InProceedings{pmlr-v139-qiu21d,\n title = \t {On Reward-Free RL with Kernel and Neural Function Approximations: Single-Agent MDP and Markov Game},\n author = {Qiu, Shuang and Ye, Jieping and Wang, Zhaoran and Yang, Zhuoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8737--8747},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiu21d/qiu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiu21d.html},\n abstract = \t {To achieve sample efficiency in reinforcement learning (RL), it necessitates to efficiently explore the underlying environment. Under the offline setting, addressing the exploration challenge lies in collecting an offline dataset with sufficient coverage. Motivated by such a challenge, we study the reward-free RL problem, where an agent aims to thoroughly explore the environment without any pre-specified reward function. Then, given any extrinsic reward, the agent computes the optimal policy via offline RL with data collected in the exploration stage. Moreover, we tackle this problem under the context of function approximation, leveraging powerful function approximators. Specifically, we propose to explore via an optimistic variant of the value-iteration algorithm incorporating kernel and neural function approximations, where we adopt the associated exploration bonus as the exploration reward. Moreover, we design exploration and planning algorithms for both single-agent MDPs and zero-sum Markov games and prove that our methods can achieve $\\widetilde{\\mathcal{O}}(1 /\\varepsilon^2)$ sample complexity for generating a $\\varepsilon$-suboptimal policy or $\\varepsilon$-approximate Nash equilibrium when given an arbitrary extrinsic reward. To the best of our knowledge, we establish the first provably efficient reward-free RL algorithm with kernel and neural function approximators.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiu21d/qiu21d.pdf", "supp": "", "pdf_size": 369306, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16176632215725006143&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Michigan; University of Michigan; Northwestern University; Princeton University", "aff_domain": "umich.edu;umich.edu;gmail.com;princeton.edu", "email": "umich.edu;umich.edu;gmail.com;princeton.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/qiu21d.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "University of Michigan;Northwestern University;Princeton University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.umich.edu;https://www.northwestern.edu;https://www.princeton.edu", "aff_unique_abbr": "UM;NU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "On Robust Mean Estimation under Coordinate-level Corruption", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10339", "id": "10339", "proceeding": "http://proceedings.mlr.press/v139/liu21r.html", "slides": "", "author_site": "Zifan Liu, Jong Ho Park, Theo Rekatsinas, Christos Tzamos", "author": "Zifan Liu; Jong Ho Park; Theodoros Rekatsinas; Christos Tzamos", "abstract": "We study the problem of robust mean estimation and introduce a novel Hamming distance-based measure of distribution shift for coordinate-level corruptions. We show that this measure yields adversary models that capture more realistic corruptions than those used in prior works, and present an information-theoretic analysis of robust mean estimation in these settings. We show that for structured distributions, methods that leverage the structure yield information theoretically more accurate mean estimation. We also focus on practical algorithms for robust mean estimation and study when data cleaning-inspired approaches that first fix corruptions in the input data and then perform robust mean estimation can match the information theoretic bounds of our analysis. We finally demonstrate experimentally that this two-step approach outperforms structure-agnostic robust estimation and provides accurate mean estimation even for high-magnitude corruption.", "bibtex": "@InProceedings{pmlr-v139-liu21r,\n title = \t {On Robust Mean Estimation under Coordinate-level Corruption},\n author = {Liu, Zifan and Park, Jong Ho and Rekatsinas, Theodoros and Tzamos, Christos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6914--6924},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21r/liu21r.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21r.html},\n abstract = \t {We study the problem of robust mean estimation and introduce a novel Hamming distance-based measure of distribution shift for coordinate-level corruptions. We show that this measure yields adversary models that capture more realistic corruptions than those used in prior works, and present an information-theoretic analysis of robust mean estimation in these settings. We show that for structured distributions, methods that leverage the structure yield information theoretically more accurate mean estimation. We also focus on practical algorithms for robust mean estimation and study when data cleaning-inspired approaches that first fix corruptions in the input data and then perform robust mean estimation can match the information theoretic bounds of our analysis. We finally demonstrate experimentally that this two-step approach outperforms structure-agnostic robust estimation and provides accurate mean estimation even for high-magnitude corruption.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21r/liu21r.pdf", "supp": "", "pdf_size": 2419961, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6395841663870350883&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Wisconsin-Madison, Madison, USA; Department of Computer Science, University of Wisconsin-Madison, Madison, USA; Department of Computer Science, University of Wisconsin-Madison, Madison, USA; Department of Computer Science, University of Wisconsin-Madison, Madison, USA", "aff_domain": "wisc.edu;wisc.edu; ; ", "email": "wisc.edu;wisc.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21r.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Wisconsin-Madison", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW-Madison", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "On Signal-to-Noise Ratio Issues in Variational Inference for Deep Gaussian Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10363", "id": "10363", "proceeding": "http://proceedings.mlr.press/v139/rudner21a.html", "slides": "/media/icml-2021/Slides/10363.pdf", "author_site": "Tim G. J. Rudner, Oscar Key, Yarin Gal, Tom Rainforth", "author": "Tim G. J. Rudner; Oscar Key; Yarin Gal; Tom Rainforth", "abstract": "We show that the gradient estimates used in training Deep Gaussian Processes (DGPs) with importance-weighted variational inference are susceptible to signal-to-noise ratio (SNR) issues. Specifically, we show both theoretically and via an extensive empirical evaluation that the SNR of the gradient estimates for the latent variable\u2019s variational parameters decreases as the number of importance samples increases. As a result, these gradient estimates degrade to pure noise if the number of importance samples is too large. To address this pathology, we show how doubly-reparameterized gradient estimators, originally proposed for training variational autoencoders, can be adapted to the DGP setting and that the resultant estimators completely remedy the SNR issue, thereby providing more reliable training. Finally, we demonstrate that our fix can lead to consistent improvements in the predictive performance of DGP models.", "bibtex": "@InProceedings{pmlr-v139-rudner21a,\n title = \t {On Signal-to-Noise Ratio Issues in Variational Inference for Deep Gaussian Processes},\n author = {Rudner, Tim G. J. and Key, Oscar and Gal, Yarin and Rainforth, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9148--9156},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rudner21a/rudner21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rudner21a.html},\n abstract = \t {We show that the gradient estimates used in training Deep Gaussian Processes (DGPs) with importance-weighted variational inference are susceptible to signal-to-noise ratio (SNR) issues. Specifically, we show both theoretically and via an extensive empirical evaluation that the SNR of the gradient estimates for the latent variable\u2019s variational parameters decreases as the number of importance samples increases. As a result, these gradient estimates degrade to pure noise if the number of importance samples is too large. To address this pathology, we show how doubly-reparameterized gradient estimators, originally proposed for training variational autoencoders, can be adapted to the DGP setting and that the resultant estimators completely remedy the SNR issue, thereby providing more reliable training. Finally, we demonstrate that our fix can lead to consistent improvements in the predictive performance of DGP models.}\n}", "pdf": "http://proceedings.mlr.press/v139/rudner21a/rudner21a.pdf", "supp": "", "pdf_size": 563806, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16244183498083641614&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, University of Oxford; Computer Science Department, University College London + Department of Computer Science, University of Oxford; Department of Computer Science, University of Oxford; Department of Computer Science, University of Oxford", "aff_domain": "cs.ox.ac.uk; ;cs.ox.ac.uk;cs.ox.ac.uk", "email": "cs.ox.ac.uk; ;cs.ox.ac.uk;cs.ox.ac.uk", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rudner21a.html", "aff_unique_index": "0;1+0;0;0", "aff_unique_norm": "University of Oxford;University College London", "aff_unique_dep": "Department of Computer Science;Computer Science Department", "aff_unique_url": "https://www.ox.ac.uk;https://www.ucl.ac.uk", "aff_unique_abbr": "Oxford;UCL", "aff_campus_unique_index": "0;1+0;0;0", "aff_campus_unique": "Oxford;London", "aff_country_unique_index": "0;0+0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "On Variational Inference in Biclustering Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10077", "id": "10077", "proceeding": "http://proceedings.mlr.press/v139/fang21b.html", "slides": "", "author_site": "Guanhua Fang, Ping Li", "author": "Guanhua Fang; Ping Li", "abstract": "Biclustering structures exist ubiquitously in data matrices and the biclustering problem was first formalized by John Hartigan (1972) to cluster rows and columns simultaneously. In this paper, we develop a theory for the estimation of general biclustering models, where the data is assumed to follow certain statistical distribution with underlying biclustering structure. Due to the existence of latent variables, directly computing the maximal likelihood estimator is prohibitively difficult in practice and we instead consider the variational inference (VI) approach to solve the parameter estimation problem. Although variational inference method generally has good empirical performance, there are very few theoretical results around VI. In this paper, we obtain the precise estimation bound of variational estimator and show that it matches the minimax rate in terms of estimation error under mild assumptions in biclustering setting. Furthermore, we study the convergence property of the coordinate ascent variational inference algorithm, where both local and global convergence results have been provided. Numerical results validate our new theories.", "bibtex": "@InProceedings{pmlr-v139-fang21b,\n title = \t {On Variational Inference in Biclustering Models},\n author = {Fang, Guanhua and Li, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3111--3121},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fang21b/fang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/fang21b.html},\n abstract = \t {Biclustering structures exist ubiquitously in data matrices and the biclustering problem was first formalized by John Hartigan (1972) to cluster rows and columns simultaneously. In this paper, we develop a theory for the estimation of general biclustering models, where the data is assumed to follow certain statistical distribution with underlying biclustering structure. Due to the existence of latent variables, directly computing the maximal likelihood estimator is prohibitively difficult in practice and we instead consider the variational inference (VI) approach to solve the parameter estimation problem. Although variational inference method generally has good empirical performance, there are very few theoretical results around VI. In this paper, we obtain the precise estimation bound of variational estimator and show that it matches the minimax rate in terms of estimation error under mild assumptions in biclustering setting. Furthermore, we study the convergence property of the coordinate ascent variational inference algorithm, where both local and global convergence results have been provided. Numerical results validate our new theories.}\n}", "pdf": "http://proceedings.mlr.press/v139/fang21b/fang21b.pdf", "supp": "", "pdf_size": 390723, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3133683780617590914&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research", "aff_domain": "baidu.com;baidu.com", "email": "baidu.com;baidu.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/fang21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Baidu", "aff_unique_dep": "Cognitive Computing Lab", "aff_unique_url": "https://baidu.com", "aff_unique_abbr": "Baidu", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "On a Combination of Alternating Minimization and Nesterov\u2019s Momentum", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9963", "id": "9963", "proceeding": "http://proceedings.mlr.press/v139/guminov21a.html", "slides": "/media/icml-2021/Slides/9963.pdf", "author_site": "Sergey Guminov, Pavel Dvurechenskii, Nazarii Tupitsa, Alexander Gasnikov", "author": "Sergey Guminov; Pavel Dvurechensky; Nazarii Tupitsa; Alexander Gasnikov", "abstract": "Alternating minimization (AM) procedures are practically efficient in many applications for solving convex and non-convex optimization problems. On the other hand, Nesterov\u2019s accelerated gradient is theoretically optimal first-order method for convex optimization. In this paper we combine AM and Nesterov\u2019s acceleration to propose an accelerated alternating minimization algorithm. We prove $1/k^2$ convergence rate in terms of the objective for convex problems and $1/k$ in terms of the squared gradient norm for non-convex problems, where $k$ is the iteration counter. Our method does not require any knowledge of neither convexity of the problem nor function parameters such as Lipschitz constant of the gradient, i.e. it is adaptive to convexity and smoothness and is uniformly optimal for smooth convex and non-convex problems. Further, we develop its primal-dual modification for strongly convex problems with linear constraints and prove the same $1/k^2$ for the primal objective residual and constraints feasibility.", "bibtex": "@InProceedings{pmlr-v139-guminov21a,\n title = \t {On a Combination of Alternating Minimization and Nesterov\u2019s Momentum},\n author = {Guminov, Sergey and Dvurechensky, Pavel and Tupitsa, Nazarii and Gasnikov, Alexander},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3886--3898},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/guminov21a/guminov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/guminov21a.html},\n abstract = \t {Alternating minimization (AM) procedures are practically efficient in many applications for solving convex and non-convex optimization problems. On the other hand, Nesterov\u2019s accelerated gradient is theoretically optimal first-order method for convex optimization. In this paper we combine AM and Nesterov\u2019s acceleration to propose an accelerated alternating minimization algorithm. We prove $1/k^2$ convergence rate in terms of the objective for convex problems and $1/k$ in terms of the squared gradient norm for non-convex problems, where $k$ is the iteration counter. Our method does not require any knowledge of neither convexity of the problem nor function parameters such as Lipschitz constant of the gradient, i.e. it is adaptive to convexity and smoothness and is uniformly optimal for smooth convex and non-convex problems. Further, we develop its primal-dual modification for strongly convex problems with linear constraints and prove the same $1/k^2$ for the primal objective residual and constraints feasibility.}\n}", "pdf": "http://proceedings.mlr.press/v139/guminov21a/guminov21a.pdf", "supp": "", "pdf_size": 1189358, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16839307980267270924&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Moscow Institute of Physics and Technology + Institute for Information Transmission Problems RAS + HDI Lab @ National Research University Higher School of Economics; Weierstrass Institute for Applied Analysis and Stochastics + Institute for Information Transmission Problems RAS + HDI Lab @ National Research University Higher School of Economics; Moscow Institute of Physics and Technology + Institute for Information Transmission Problems RAS + HDI Lab @ National Research University Higher School of Economics; Moscow Institute of Physics and Technology + Institute for Information Transmission Problems RAS + HDI Lab @ National Research University Higher School of Economics", "aff_domain": "phystech.edu;wias-berlin.de; ; ", "email": "phystech.edu;wias-berlin.de; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/guminov21a.html", "aff_unique_index": "0+1+2;3+1+2;0+1+2;0+1+2", "aff_unique_norm": "Moscow Institute of Physics and Technology;Institute for Information Transmission Problems;National Research University Higher School of Economics;Weierstrass Institute for Applied Analysis and Stochastics", "aff_unique_dep": ";;HDI Lab;", "aff_unique_url": "https://www.mipt.ru/en;http://www.iitp.ru;https://hse.ru;https://www.wias-berlin.de/", "aff_unique_abbr": "MIPT;IITP RAS;HSE;WIAS", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;1+0+0;0+0+0;0+0+0", "aff_country_unique": "Russian Federation;Germany" }, { "title": "On the Convergence of Hamiltonian Monte Carlo with Stochastic Gradients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9521", "id": "9521", "proceeding": "http://proceedings.mlr.press/v139/zou21b.html", "slides": "", "author_site": "Difan Zou, Quanquan Gu", "author": "Difan Zou; Quanquan Gu", "abstract": "Hamiltonian Monte Carlo (HMC), built based on the Hamilton\u2019s equation, has been witnessed great success in sampling from high-dimensional posterior distributions. However, it also suffers from computational inefficiency, especially for large training datasets. One common idea to overcome this computational bottleneck is using stochastic gradients, which only queries a mini-batch of training data in each iteration. However, unlike the extensive studies on the convergence analysis of HMC using full gradients, few works focus on establishing the convergence guarantees of stochastic gradient HMC algorithms. In this paper, we propose a general framework for proving the convergence rate of HMC with stochastic gradient estimators, for sampling from strongly log-concave and log-smooth target distributions. We show that the convergence to the target distribution in $2$-Wasserstein distance can be guaranteed as long as the stochastic gradient estimator is unbiased and its variance is upper bounded along the algorithm trajectory. We further apply the proposed framework to analyze the convergence rates of HMC with four standard stochastic gradient estimators: mini-batch stochastic gradient (SG), stochastic variance reduced gradient (SVRG), stochastic average gradient (SAGA), and control variate gradient (CVG). Theoretical results explain the inefficiency of mini-batch SG, and suggest that SVRG and SAGA perform better in the tasks with high-precision requirements, while CVG performs better for large dataset. Experiment results verify our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-zou21b,\n title = \t {On the Convergence of Hamiltonian Monte Carlo with Stochastic Gradients},\n author = {Zou, Difan and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {13012--13022},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zou21b/zou21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zou21b.html},\n abstract = \t {Hamiltonian Monte Carlo (HMC), built based on the Hamilton\u2019s equation, has been witnessed great success in sampling from high-dimensional posterior distributions. However, it also suffers from computational inefficiency, especially for large training datasets. One common idea to overcome this computational bottleneck is using stochastic gradients, which only queries a mini-batch of training data in each iteration. However, unlike the extensive studies on the convergence analysis of HMC using full gradients, few works focus on establishing the convergence guarantees of stochastic gradient HMC algorithms. In this paper, we propose a general framework for proving the convergence rate of HMC with stochastic gradient estimators, for sampling from strongly log-concave and log-smooth target distributions. We show that the convergence to the target distribution in $2$-Wasserstein distance can be guaranteed as long as the stochastic gradient estimator is unbiased and its variance is upper bounded along the algorithm trajectory. We further apply the proposed framework to analyze the convergence rates of HMC with four standard stochastic gradient estimators: mini-batch stochastic gradient (SG), stochastic variance reduced gradient (SVRG), stochastic average gradient (SAGA), and control variate gradient (CVG). Theoretical results explain the inefficiency of mini-batch SG, and suggest that SVRG and SAGA perform better in the tasks with high-precision requirements, while CVG performs better for large dataset. Experiment results verify our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/zou21b/zou21b.pdf", "supp": "", "pdf_size": 4163427, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12169805453519262859&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, UCLA; Department of Computer Science, UCLA", "aff_domain": "cs.ucla.edu;cs.ucla.edu", "email": "cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zou21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On the Explicit Role of Initialization on the Convergence and Implicit Bias of Overparametrized Linear Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10397", "id": "10397", "proceeding": "http://proceedings.mlr.press/v139/min21c.html", "slides": "/media/icml-2021/Slides/10397.pdf", "author_site": "Hancheng Min, Salma Tarmoun, Rene Vidal, Enrique Mallada", "author": "Hancheng Min; Salma Tarmoun; Rene Vidal; Enrique Mallada", "abstract": "Neural networks trained via gradient descent with random initialization and without any regularization enjoy good generalization performance in practice despite being highly overparametrized. A promising direction to explain this phenomenon is to study how initialization and overparametrization affect convergence and implicit bias of training algorithms. In this paper, we present a novel analysis of single-hidden-layer linear networks trained under gradient flow, which connects initialization, optimization, and overparametrization. Firstly, we show that the squared loss converges exponentially to its optimum at a rate that depends on the level of imbalance of the initialization. Secondly, we show that proper initialization constrains the dynamics of the network parameters to lie within an invariant set. In turn, minimizing the loss over this set leads to the min-norm solution. Finally, we show that large hidden layer width, together with (properly scaled) random initialization, ensures proximity to such an invariant set during training, allowing us to derive a novel non-asymptotic upper-bound on the distance between the trained network and the min-norm solution.", "bibtex": "@InProceedings{pmlr-v139-min21c,\n title = \t {On the Explicit Role of Initialization on the Convergence and Implicit Bias of Overparametrized Linear Networks},\n author = {Min, Hancheng and Tarmoun, Salma and Vidal, Rene and Mallada, Enrique},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7760--7768},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/min21c/min21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/min21c.html},\n abstract = \t {Neural networks trained via gradient descent with random initialization and without any regularization enjoy good generalization performance in practice despite being highly overparametrized. A promising direction to explain this phenomenon is to study how initialization and overparametrization affect convergence and implicit bias of training algorithms. In this paper, we present a novel analysis of single-hidden-layer linear networks trained under gradient flow, which connects initialization, optimization, and overparametrization. Firstly, we show that the squared loss converges exponentially to its optimum at a rate that depends on the level of imbalance of the initialization. Secondly, we show that proper initialization constrains the dynamics of the network parameters to lie within an invariant set. In turn, minimizing the loss over this set leads to the min-norm solution. Finally, we show that large hidden layer width, together with (properly scaled) random initialization, ensures proximity to such an invariant set during training, allowing us to derive a novel non-asymptotic upper-bound on the distance between the trained network and the min-norm solution.}\n}", "pdf": "http://proceedings.mlr.press/v139/min21c/min21c.pdf", "supp": "", "pdf_size": 533063, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11706884624461234169&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Mathematical Institute for Data Science+Department of Electrical and Computer Engineering; Mathematical Institute for Data Science+Department of Applied Mathematics and Statistics; Mathematical Institute for Data Science+Department of Biomedical Engineering; Mathematical Institute for Data Science+Department of Electrical and Computer Engineering", "aff_domain": "jhu.edu; ; ; ", "email": "jhu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/min21c.html", "aff_unique_index": "0+1;0+2;0+3;0+1", "aff_unique_norm": "Mathematical Institute for Data Science;Unknown Institution;Department of Applied Mathematics and Statistics;Department of Biomedical Engineering", "aff_unique_dep": "Data Science;Department of Electrical and Computer Engineering;Applied Mathematics and Statistics;Biomedical Engineering", "aff_unique_url": ";;;", "aff_unique_abbr": ";;;", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": ";;;", "aff_country_unique": "" }, { "title": "On the Generalization Power of Overfitted Two-Layer Neural Tangent Kernel Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8703", "id": "8703", "proceeding": "http://proceedings.mlr.press/v139/ju21a.html", "slides": "", "author_site": "Peizhong Ju, Xiaojun Lin, Ness Shroff", "author": "Peizhong Ju; Xiaojun Lin; Ness Shroff", "abstract": "In this paper, we study the generalization performance of min $\\ell_2$-norm overfitting solutions for the neural tangent kernel (NTK) model of a two-layer neural network with ReLU activation that has no bias term. We show that, depending on the ground-truth function, the test error of overfitted NTK models exhibits characteristics that are different from the \"double-descent\" of other overparameterized linear models with simple Fourier or Gaussian features. Specifically, for a class of learnable functions, we provide a new upper bound of the generalization error that approaches a small limiting value, even when the number of neurons $p$ approaches infinity. This limiting value further decreases with the number of training samples $n$. For functions outside of this class, we provide a lower bound on the generalization error that does not diminish to zero even when $n$ and $p$ are both large.", "bibtex": "@InProceedings{pmlr-v139-ju21a,\n title = \t {On the Generalization Power of Overfitted Two-Layer Neural Tangent Kernel Models},\n author = {Ju, Peizhong and Lin, Xiaojun and Shroff, Ness},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5137--5147},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ju21a/ju21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ju21a.html},\n abstract = \t {In this paper, we study the generalization performance of min $\\ell_2$-norm overfitting solutions for the neural tangent kernel (NTK) model of a two-layer neural network with ReLU activation that has no bias term. We show that, depending on the ground-truth function, the test error of overfitted NTK models exhibits characteristics that are different from the \"double-descent\" of other overparameterized linear models with simple Fourier or Gaussian features. Specifically, for a class of learnable functions, we provide a new upper bound of the generalization error that approaches a small limiting value, even when the number of neurons $p$ approaches infinity. This limiting value further decreases with the number of training samples $n$. For functions outside of this class, we provide a lower bound on the generalization error that does not diminish to zero even when $n$ and $p$ are both large.}\n}", "pdf": "http://proceedings.mlr.press/v139/ju21a/ju21a.pdf", "supp": "", "pdf_size": 545009, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4839571188012368882&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "School of Electrical and Computer Engineering, Purdue University, West Lafayette, Indiana, USA; School of Electrical and Computer Engineering, Purdue University, West Lafayette, Indiana, USA; Department of ECE and CSE, The Ohio State University, Columbus, Ohio, USA", "aff_domain": "purdue.edu;purdue.edu; ", "email": "purdue.edu;purdue.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ju21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Purdue University;Ohio State University", "aff_unique_dep": "School of Electrical and Computer Engineering;Department of ECE and CSE", "aff_unique_url": "https://www.purdue.edu;https://www.osu.edu", "aff_unique_abbr": "Purdue;OSU", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "West Lafayette;Columbus", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "On the Implicit Bias of Initialization Shape: Beyond Infinitesimal Mirror Descent", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10387", "id": "10387", "proceeding": "http://proceedings.mlr.press/v139/azulay21a.html", "slides": "", "author_site": "Shahar Azulay, Edward Moroshko, Mor Shpigel Nacson, Blake Woodworth, Nati Srebro, Amir Globerson, Daniel Soudry", "author": "Shahar Azulay; Edward Moroshko; Mor Shpigel Nacson; Blake E Woodworth; Nathan Srebro; Amir Globerson; Daniel Soudry", "abstract": "Recent work has highlighted the role of initialization scale in determining the structure of the solutions that gradient methods converge to. In particular, it was shown that large initialization leads to the neural tangent kernel regime solution, whereas small initialization leads to so called \u201crich regimes\u201d. However, the initialization structure is richer than the overall scale alone and involves relative magnitudes of different weights and layers in the network. Here we show that these relative scales, which we refer to as initialization shape, play an important role in determining the learned model. We develop a novel technique for deriving the inductive bias of gradient-flow and use it to obtain closed-form implicit regularizers for multiple cases of interest.", "bibtex": "@InProceedings{pmlr-v139-azulay21a,\n title = \t {On the Implicit Bias of Initialization Shape: Beyond Infinitesimal Mirror Descent},\n author = {Azulay, Shahar and Moroshko, Edward and Nacson, Mor Shpigel and Woodworth, Blake E and Srebro, Nathan and Globerson, Amir and Soudry, Daniel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {468--477},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/azulay21a/azulay21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/azulay21a.html},\n abstract = \t {Recent work has highlighted the role of initialization scale in determining the structure of the solutions that gradient methods converge to. In particular, it was shown that large initialization leads to the neural tangent kernel regime solution, whereas small initialization leads to so called \u201crich regimes\u201d. However, the initialization structure is richer than the overall scale alone and involves relative magnitudes of different weights and layers in the network. Here we show that these relative scales, which we refer to as initialization shape, play an important role in determining the learned model. We develop a novel technique for deriving the inductive bias of gradient-flow and use it to obtain closed-form implicit regularizers for multiple cases of interest.}\n}", "pdf": "http://proceedings.mlr.press/v139/azulay21a/azulay21a.pdf", "supp": "", "pdf_size": 802241, "gs_citation": 97, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14096071310769448211&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "The Blavatnik School of Computer Science, Tel Aviv University; Technion - Israel Institute of Technology; Technion - Israel Institute of Technology; Toyota Technological Institute at Chicago; Toyota Technological Institute at Chicago; The Blavatnik School of Computer Science, Tel Aviv University; Technion - Israel Institute of Technology", "aff_domain": "mail.tau.ac.il;gmail.com;gmail.com;ttic.edu;ttic.edu;gmail.com;gmail.com", "email": "mail.tau.ac.il;gmail.com;gmail.com;ttic.edu;ttic.edu;gmail.com;gmail.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/azulay21a.html", "aff_unique_index": "0;1;1;2;2;0;1", "aff_unique_norm": "Tel Aviv University;Technion - Israel Institute of Technology;Toyota Technological Institute at Chicago", "aff_unique_dep": "Blavatnik School of Computer Science;;", "aff_unique_url": "https://www.tau.ac.il;https://www.technion.ac.il/en/;https://www.tti-chicago.org", "aff_unique_abbr": "TAU;Technion;TTI Chicago", "aff_campus_unique_index": "0;2;2;0", "aff_campus_unique": "Tel Aviv;;Chicago", "aff_country_unique_index": "0;0;0;1;1;0;0", "aff_country_unique": "Israel;United States" }, { "title": "On the Inherent Regularization Effects of Noise Injection During Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9413", "id": "9413", "proceeding": "http://proceedings.mlr.press/v139/dhifallah21a.html", "slides": "", "author_site": "Oussama Dhifallah, Yue Lu", "author": "Oussama Dhifallah; Yue Lu", "abstract": "Randomly perturbing networks during the training process is a commonly used approach to improving generalization performance. In this paper, we present a theoretical study of one particular way of random perturbation, which corresponds to injecting artificial noise to the training data. We provide a precise asymptotic characterization of the training and generalization errors of such randomly perturbed learning problems on a random feature model. Our analysis shows that Gaussian noise injection in the training process is equivalent to introducing a weighted ridge regularization, when the number of noise injections tends to infinity. The explicit form of the regularization is also given. Numerical results corroborate our asymptotic predictions, showing that they are accurate even in moderate problem dimensions. Our theoretical predictions are based on a new correlated Gaussian equivalence conjecture that generalizes recent results in the study of random feature models.", "bibtex": "@InProceedings{pmlr-v139-dhifallah21a,\n title = \t {On the Inherent Regularization Effects of Noise Injection During Training},\n author = {Dhifallah, Oussama and Lu, Yue},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2665--2675},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dhifallah21a/dhifallah21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dhifallah21a.html},\n abstract = \t {Randomly perturbing networks during the training process is a commonly used approach to improving generalization performance. In this paper, we present a theoretical study of one particular way of random perturbation, which corresponds to injecting artificial noise to the training data. We provide a precise asymptotic characterization of the training and generalization errors of such randomly perturbed learning problems on a random feature model. Our analysis shows that Gaussian noise injection in the training process is equivalent to introducing a weighted ridge regularization, when the number of noise injections tends to infinity. The explicit form of the regularization is also given. Numerical results corroborate our asymptotic predictions, showing that they are accurate even in moderate problem dimensions. Our theoretical predictions are based on a new correlated Gaussian equivalence conjecture that generalizes recent results in the study of random feature models.}\n}", "pdf": "http://proceedings.mlr.press/v139/dhifallah21a/dhifallah21a.pdf", "supp": "", "pdf_size": 827853, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10143941375966904780&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "John A. Paulson School of Engineering and Applied Sciences, Harvard University; John A. Paulson School of Engineering and Applied Sciences, Harvard University", "aff_domain": "g.harvard.edu; ", "email": "g.harvard.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/dhifallah21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "John A. Paulson School of Engineering and Applied Sciences", "aff_unique_url": "https://www.harvard.edu", "aff_unique_abbr": "Harvard", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On the Optimality of Batch Policy Optimization Algorithms", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9753", "id": "9753", "proceeding": "http://proceedings.mlr.press/v139/xiao21b.html", "slides": "", "author_site": "Chenjun Xiao, Yifan Wu, Jincheng Mei, Bo Dai, Tor Lattimore, Lihong Li, Csaba Szepesvari, Dale Schuurmans", "author": "Chenjun Xiao; Yifan Wu; Jincheng Mei; Bo Dai; Tor Lattimore; Lihong Li; Csaba Szepesvari; Dale Schuurmans", "abstract": "Batch policy optimization considers leveraging existing data for policy construction before interacting with an environment. Although interest in this problem has grown significantly in recent years, its theoretical foundations remain under-developed. To advance the understanding of this problem, we provide three results that characterize the limits and possibilities of batch policy optimization in the finite-armed stochastic bandit setting. First, we introduce a class of confidence-adjusted index algorithms that unifies optimistic and pessimistic principles in a common framework, which enables a general analysis. For this family, we show that any confidence-adjusted index algorithm is minimax optimal, whether it be optimistic, pessimistic or neutral. Our analysis reveals that instance-dependent optimality, commonly used to establish optimality of on-line stochastic bandit algorithms, cannot be achieved by any algorithm in the batch setting. In particular, for any algorithm that performs optimally in some environment, there exists another environment where the same algorithm suffers arbitrarily larger regret. Therefore, to establish a framework for distinguishing algorithms, we introduce a new weighted-minimax criterion that considers the inherent difficulty of optimal value prediction. We demonstrate how this criterion can be used to justify commonly used pessimistic principles for batch policy optimization.", "bibtex": "@InProceedings{pmlr-v139-xiao21b,\n title = \t {On the Optimality of Batch Policy Optimization Algorithms},\n author = {Xiao, Chenjun and Wu, Yifan and Mei, Jincheng and Dai, Bo and Lattimore, Tor and Li, Lihong and Szepesvari, Csaba and Schuurmans, Dale},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11362--11371},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xiao21b/xiao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/xiao21b.html},\n abstract = \t {Batch policy optimization considers leveraging existing data for policy construction before interacting with an environment. Although interest in this problem has grown significantly in recent years, its theoretical foundations remain under-developed. To advance the understanding of this problem, we provide three results that characterize the limits and possibilities of batch policy optimization in the finite-armed stochastic bandit setting. First, we introduce a class of confidence-adjusted index algorithms that unifies optimistic and pessimistic principles in a common framework, which enables a general analysis. For this family, we show that any confidence-adjusted index algorithm is minimax optimal, whether it be optimistic, pessimistic or neutral. Our analysis reveals that instance-dependent optimality, commonly used to establish optimality of on-line stochastic bandit algorithms, cannot be achieved by any algorithm in the batch setting. In particular, for any algorithm that performs optimally in some environment, there exists another environment where the same algorithm suffers arbitrarily larger regret. Therefore, to establish a framework for distinguishing algorithms, we introduce a new weighted-minimax criterion that considers the inherent difficulty of optimal value prediction. We demonstrate how this criterion can be used to justify commonly used pessimistic principles for batch policy optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/xiao21b/xiao21b.pdf", "supp": "", "pdf_size": 3757283, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9170095676042993036&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "University of Alberta + Google Research; Carnegie Mellon University; DeepMind; Google Research, Brain Team + Amazon; University of Alberta + Google Research; Google Research; University of Alberta + DeepMind; University of Alberta + Google Research", "aff_domain": "ualberta.ca;andrew.cmu.edu; ; ; ; ; ; ", "email": "ualberta.ca;andrew.cmu.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/xiao21b.html", "aff_unique_index": "0+1;2;3;1+4;0+1;1;0+3;0+1", "aff_unique_norm": "University of Alberta;Google;Carnegie Mellon University;DeepMind;Amazon", "aff_unique_dep": ";Google Research;;;Amazon.com, Inc.", "aff_unique_url": "https://www.ualberta.ca;https://research.google;https://www.cmu.edu;https://deepmind.com;https://www.amazon.com", "aff_unique_abbr": "UAlberta;Google Research;CMU;DeepMind;Amazon", "aff_campus_unique_index": "1;1;1;1;;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0+1;1;2;1+1;0+1;1;0+2;0+1", "aff_country_unique": "Canada;United States;United Kingdom" }, { "title": "On the Power of Localized Perceptron for Label-Optimal Learning of Halfspaces with Adversarial Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9039", "id": "9039", "proceeding": "http://proceedings.mlr.press/v139/shen21a.html", "slides": "", "author": "Jie Shen", "abstract": "We study {\\em online} active learning of homogeneous halfspaces in $\\mathbb{R}^d$ with adversarial noise where the overall probability of a noisy label is constrained to be at most $\\nu$. Our main contribution is a Perceptron-like online active learning algorithm that runs in polynomial time, and under the conditions that the marginal distribution is isotropic log-concave and $\\nu = \\Omega(\\epsilon)$, where $\\epsilon \\in (0, 1)$ is the target error rate, our algorithm PAC learns the underlying halfspace with near-optimal label complexity of $\\tilde{O}\\big(d \\cdot \\polylog(\\frac{1}{\\epsilon})\\big)$ and sample complexity of $\\tilde{O}\\big(\\frac{d}{\\epsilon} \\big)$. Prior to this work, existing online algorithms designed for tolerating the adversarial noise are subject to either label complexity polynomial in $\\frac{1}{\\epsilon}$, or suboptimal noise tolerance, or restrictive marginal distributions. With the additional prior knowledge that the underlying halfspace is $s$-sparse, we obtain attribute-efficient label complexity of $\\tilde{O}\\big( s \\cdot \\polylog(d, \\frac{1}{\\epsilon}) \\big)$ and sample complexity of $\\tilde{O}\\big(\\frac{s}{\\epsilon} \\cdot \\polylog(d) \\big)$. As an immediate corollary, we show that under the agnostic model where no assumption is made on the noise rate $\\nu$, our active learner achieves an error rate of $O(OPT) + \\epsilon$ with the same running time and label and sample complexity, where $OPT$ is the best possible error rate achievable by any homogeneous halfspace.", "bibtex": "@InProceedings{pmlr-v139-shen21a,\n title = \t {On the Power of Localized Perceptron for Label-Optimal Learning of Halfspaces with Adversarial Noise},\n author = {Shen, Jie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9503--9514},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shen21a/shen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shen21a.html},\n abstract = \t {We study {\\em online} active learning of homogeneous halfspaces in $\\mathbb{R}^d$ with adversarial noise where the overall probability of a noisy label is constrained to be at most $\\nu$. Our main contribution is a Perceptron-like online active learning algorithm that runs in polynomial time, and under the conditions that the marginal distribution is isotropic log-concave and $\\nu = \\Omega(\\epsilon)$, where $\\epsilon \\in (0, 1)$ is the target error rate, our algorithm PAC learns the underlying halfspace with near-optimal label complexity of $\\tilde{O}\\big(d \\cdot \\polylog(\\frac{1}{\\epsilon})\\big)$ and sample complexity of $\\tilde{O}\\big(\\frac{d}{\\epsilon} \\big)$. Prior to this work, existing online algorithms designed for tolerating the adversarial noise are subject to either label complexity polynomial in $\\frac{1}{\\epsilon}$, or suboptimal noise tolerance, or restrictive marginal distributions. With the additional prior knowledge that the underlying halfspace is $s$-sparse, we obtain attribute-efficient label complexity of $\\tilde{O}\\big( s \\cdot \\polylog(d, \\frac{1}{\\epsilon}) \\big)$ and sample complexity of $\\tilde{O}\\big(\\frac{s}{\\epsilon} \\cdot \\polylog(d) \\big)$. As an immediate corollary, we show that under the agnostic model where no assumption is made on the noise rate $\\nu$, our active learner achieves an error rate of $O(OPT) + \\epsilon$ with the same running time and label and sample complexity, where $OPT$ is the best possible error rate achievable by any homogeneous halfspace.}\n}", "pdf": "http://proceedings.mlr.press/v139/shen21a/shen21a.pdf", "supp": "", "pdf_size": 342778, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16972866778565071319&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stevens Institute of Technology", "aff_domain": "stevens.edu", "email": "stevens.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/shen21a.html", "aff_unique_index": "0", "aff_unique_norm": "Stevens Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.stevens.edu", "aff_unique_abbr": "SIT", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "On the Predictability of Pruning Across Scales", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9937", "id": "9937", "proceeding": "http://proceedings.mlr.press/v139/rosenfeld21a.html", "slides": "", "author_site": "Jonathan Rosenfeld, Jonathan Frankle, Michael Carbin, Nir Shavit", "author": "Jonathan S Rosenfeld; Jonathan Frankle; Michael Carbin; Nir Shavit", "abstract": "We show that the error of iteratively magnitude-pruned networks empirically follows a scaling law with interpretable coefficients that depend on the architecture and task. We functionally approximate the error of the pruned networks, showing it is predictable in terms of an invariant tying width, depth, and pruning level, such that networks of vastly different pruned densities are interchangeable. We demonstrate the accuracy of this approximation over orders of magnitude in depth, width, dataset size, and density. We show that the functional form holds (generalizes) for large scale data (e.g., ImageNet) and architectures (e.g., ResNets). As neural networks become ever larger and costlier to train, our findings suggest a framework for reasoning conceptually and analytically about a standard method for unstructured pruning.", "bibtex": "@InProceedings{pmlr-v139-rosenfeld21a,\n title = \t {On the Predictability of Pruning Across Scales},\n author = {Rosenfeld, Jonathan S and Frankle, Jonathan and Carbin, Michael and Shavit, Nir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9075--9083},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rosenfeld21a/rosenfeld21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rosenfeld21a.html},\n abstract = \t {We show that the error of iteratively magnitude-pruned networks empirically follows a scaling law with interpretable coefficients that depend on the architecture and task. We functionally approximate the error of the pruned networks, showing it is predictable in terms of an invariant tying width, depth, and pruning level, such that networks of vastly different pruned densities are interchangeable. We demonstrate the accuracy of this approximation over orders of magnitude in depth, width, dataset size, and density. We show that the functional form holds (generalizes) for large scale data (e.g., ImageNet) and architectures (e.g., ResNets). As neural networks become ever larger and costlier to train, our findings suggest a framework for reasoning conceptually and analytically about a standard method for unstructured pruning.}\n}", "pdf": "http://proceedings.mlr.press/v139/rosenfeld21a/rosenfeld21a.pdf", "supp": "", "pdf_size": 1377915, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9584421335074848810&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "MIT CSAIL; MIT CSAIL; MIT CSAIL; MIT CSAIL", "aff_domain": "csail.mit.edu; ; ; ", "email": "csail.mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rosenfeld21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "On the Problem of Underranking in Group-Fair Ranking", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8785", "id": "8785", "proceeding": "http://proceedings.mlr.press/v139/gorantla21a.html", "slides": "", "author_site": "Sruthi Gorantla, Amit Jayant Deshpande, Anand Louis", "author": "Sruthi Gorantla; Amit Deshpande; Anand Louis", "abstract": "Bias in ranking systems, especially among the top ranks, can worsen social and economic inequalities, polarize opinions, and reinforce stereotypes. On the other hand, a bias correction for minority groups can cause more harm if perceived as favoring group-fair outcomes over meritocracy. Most group-fair ranking algorithms post-process a given ranking and output a group-fair ranking. In this paper, we formulate the problem of underranking in group-fair rankings based on how close the group-fair rank of each item is to its original rank, and prove a lower bound on the trade-off achievable for simultaneous underranking and group fairness in ranking. We give a fair ranking algorithm that takes any given ranking and outputs another ranking with simultaneous underranking and group fairness guarantees comparable to the lower bound we prove. Our experimental results confirm the theoretical trade-off between underranking and group fairness, and also show that our algorithm achieves the best of both when compared to the state-of-the-art baselines.", "bibtex": "@InProceedings{pmlr-v139-gorantla21a,\n title = \t {On the Problem of Underranking in Group-Fair Ranking},\n author = {Gorantla, Sruthi and Deshpande, Amit and Louis, Anand},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3777--3787},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gorantla21a/gorantla21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gorantla21a.html},\n abstract = \t {Bias in ranking systems, especially among the top ranks, can worsen social and economic inequalities, polarize opinions, and reinforce stereotypes. On the other hand, a bias correction for minority groups can cause more harm if perceived as favoring group-fair outcomes over meritocracy. Most group-fair ranking algorithms post-process a given ranking and output a group-fair ranking. In this paper, we formulate the problem of underranking in group-fair rankings based on how close the group-fair rank of each item is to its original rank, and prove a lower bound on the trade-off achievable for simultaneous underranking and group fairness in ranking. We give a fair ranking algorithm that takes any given ranking and outputs another ranking with simultaneous underranking and group fairness guarantees comparable to the lower bound we prove. Our experimental results confirm the theoretical trade-off between underranking and group fairness, and also show that our algorithm achieves the best of both when compared to the state-of-the-art baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/gorantla21a/gorantla21a.pdf", "supp": "", "pdf_size": 2797165, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15412568586111712326&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India; Microsoft Research, Bangalore, India; Department of Computer Science and Automation, Indian Institute of Science, Bangalore, India", "aff_domain": "iisc.ac.in; ; ", "email": "iisc.ac.in; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gorantla21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Indian Institute of Science;Microsoft", "aff_unique_dep": "Department of Computer Science and Automation;Microsoft Research", "aff_unique_url": "https://www.iisc.ac.in;https://www.microsoft.com/en-us/research/group/microsoft-research-india", "aff_unique_abbr": "IISc;MSR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Bangalore", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "On the Proof of Global Convergence of Gradient Descent for Deep ReLU Networks with Linear Widths", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8855", "id": "8855", "proceeding": "http://proceedings.mlr.press/v139/nguyen21a.html", "slides": "", "author": "Quynh Nguyen", "abstract": "We give a simple proof for the global convergence of gradient descent in training deep ReLU networks with the standard square loss, and show some of its improvements over the state-of-the-art. In particular, while prior works require all the hidden layers to be wide with width at least $\\Omega(N^8)$ ($N$ being the number of training samples), we require a single wide layer of linear, quadratic or cubic width depending on the type of initialization. Unlike many recent proofs based on the Neural Tangent Kernel (NTK), our proof need not track the evolution of the entire NTK matrix, or more generally, any quantities related to the changes of activation patterns during training. Instead, we only need to track the evolution of the output at the last hidden layer, which can be done much more easily thanks to the Lipschitz property of ReLU. Some highlights of our setting: (i) all the layers are trained with standard gradient descent, (ii) the network has standard parameterization as opposed to the NTK one, and (iii) the network has a single wide layer as opposed to having all wide hidden layers as in most of NTK-related results.", "bibtex": "@InProceedings{pmlr-v139-nguyen21a,\n title = \t {On the Proof of Global Convergence of Gradient Descent for Deep ReLU Networks with Linear Widths},\n author = {Nguyen, Quynh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8056--8062},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21a/nguyen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21a.html},\n abstract = \t {We give a simple proof for the global convergence of gradient descent in training deep ReLU networks with the standard square loss, and show some of its improvements over the state-of-the-art. In particular, while prior works require all the hidden layers to be wide with width at least $\\Omega(N^8)$ ($N$ being the number of training samples), we require a single wide layer of linear, quadratic or cubic width depending on the type of initialization. Unlike many recent proofs based on the Neural Tangent Kernel (NTK), our proof need not track the evolution of the entire NTK matrix, or more generally, any quantities related to the changes of activation patterns during training. Instead, we only need to track the evolution of the output at the last hidden layer, which can be done much more easily thanks to the Lipschitz property of ReLU. Some highlights of our setting: (i) all the layers are trained with standard gradient descent, (ii) the network has standard parameterization as opposed to the NTK one, and (iii) the network has a single wide layer as opposed to having all wide hidden layers as in most of NTK-related results.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21a/nguyen21a.pdf", "supp": "", "pdf_size": 268493, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4831976088331291011&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "MPI-MIS, Germany", "aff_domain": "mis.mpg.de", "email": "mis.mpg.de", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/nguyen21a.html", "aff_unique_index": "0", "aff_unique_norm": "Max Planck Institute for Mathematics in the Sciences", "aff_unique_dep": "", "aff_unique_url": "https://www.mis.mpg.de", "aff_unique_abbr": "MPI-MIS", "aff_country_unique_index": "0", "aff_country_unique": "Germany" }, { "title": "On the Random Conjugate Kernel and Neural Tangent Kernel", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8463", "id": "8463", "proceeding": "http://proceedings.mlr.press/v139/hu21b.html", "slides": "/media/icml-2021/Slides/8463.pdf", "author_site": "Zhengmian Hu, Heng Huang", "author": "Zhengmian Hu; Heng Huang", "abstract": "We investigate the distributions of Conjugate Kernel (CK) and Neural Tangent Kernel (NTK) for ReLU networks with random initialization. We derive the precise distributions and moments of the diagonal elements of these kernels. For a feedforward network, these values converge in law to a log-normal distribution when the network depth $d$ and width $n$ simultaneously tend to infinity and the variance of log diagonal elements is proportional to ${d}/{n}$. For the residual network, in the limit that number of branches $m$ increases to infinity and the width $n$ remains fixed, the diagonal elements of Conjugate Kernel converge in law to a log-normal distribution where the variance of log value is proportional to ${1}/{n}$, and the diagonal elements of NTK converge in law to a log-normal distributed variable times the conjugate kernel of one feedforward network. Our new theoretical analysis results suggest that residual network remains trainable in the limit of infinite branches and fixed network width. The numerical experiments are conducted and all results validate the soundness of our theoretical analysis.", "bibtex": "@InProceedings{pmlr-v139-hu21b,\n title = \t {On the Random Conjugate Kernel and Neural Tangent Kernel},\n author = {Hu, Zhengmian and Huang, Heng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4359--4368},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hu21b/hu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/hu21b.html},\n abstract = \t {We investigate the distributions of Conjugate Kernel (CK) and Neural Tangent Kernel (NTK) for ReLU networks with random initialization. We derive the precise distributions and moments of the diagonal elements of these kernels. For a feedforward network, these values converge in law to a log-normal distribution when the network depth $d$ and width $n$ simultaneously tend to infinity and the variance of log diagonal elements is proportional to ${d}/{n}$. For the residual network, in the limit that number of branches $m$ increases to infinity and the width $n$ remains fixed, the diagonal elements of Conjugate Kernel converge in law to a log-normal distribution where the variance of log value is proportional to ${1}/{n}$, and the diagonal elements of NTK converge in law to a log-normal distributed variable times the conjugate kernel of one feedforward network. Our new theoretical analysis results suggest that residual network remains trainable in the limit of infinite branches and fixed network width. The numerical experiments are conducted and all results validate the soundness of our theoretical analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/hu21b/hu21b.pdf", "supp": "", "pdf_size": 461712, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2602226808358207918&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA 15213, USA; Department of Electrical and Computer Engineering, University of Pittsburgh, Pittsburgh, PA 15213, USA", "aff_domain": "gmail.com;gmail.com", "email": "gmail.com;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hu21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Pittsburgh", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.pitt.edu", "aff_unique_abbr": "Pitt", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On the difficulty of unbiased alpha divergence minimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10337", "id": "10337", "proceeding": "http://proceedings.mlr.press/v139/geffner21a.html", "slides": "", "author_site": "Tomas Geffner, Justin Domke", "author": "Tomas Geffner; Justin Domke", "abstract": "Several approximate inference algorithms have been proposed to minimize an alpha-divergence between an approximating distribution and a target distribution. Many of these algorithms introduce bias, the magnitude of which becomes problematic in high dimensions. Other algorithms are unbiased. These often seem to suffer from high variance, but little is rigorously known. In this work we study unbiased methods for alpha-divergence minimization through the Signal-to-Noise Ratio (SNR) of the gradient estimator. We study several representative scenarios where strong analytical results are possible, such as fully-factorized or Gaussian distributions. We find that when alpha is not zero, the SNR worsens exponentially in the dimensionality of the problem. This casts doubt on the practicality of these methods. We empirically confirm these theoretical results.", "bibtex": "@InProceedings{pmlr-v139-geffner21a,\n title = \t {On the difficulty of unbiased alpha divergence minimization},\n author = {Geffner, Tomas and Domke, Justin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3650--3659},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/geffner21a/geffner21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/geffner21a.html},\n abstract = \t {Several approximate inference algorithms have been proposed to minimize an alpha-divergence between an approximating distribution and a target distribution. Many of these algorithms introduce bias, the magnitude of which becomes problematic in high dimensions. Other algorithms are unbiased. These often seem to suffer from high variance, but little is rigorously known. In this work we study unbiased methods for alpha-divergence minimization through the Signal-to-Noise Ratio (SNR) of the gradient estimator. We study several representative scenarios where strong analytical results are possible, such as fully-factorized or Gaussian distributions. We find that when alpha is not zero, the SNR worsens exponentially in the dimensionality of the problem. This casts doubt on the practicality of these methods. We empirically confirm these theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/geffner21a/geffner21a.pdf", "supp": "", "pdf_size": 7785695, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1898215219686981883&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "College of Information and Computer Science, University of Massachusetts, Amherst, MA, USA; College of Information and Computer Science, University of Massachusetts, Amherst, MA, USA", "aff_domain": "cs.umass.edu;cs.umass.edu", "email": "cs.umass.edu;cs.umass.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/geffner21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "College of Information and Computer Science", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On the price of explainability for some clustering problems", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8897", "id": "8897", "proceeding": "http://proceedings.mlr.press/v139/laber21a.html", "slides": "/media/icml-2021/Slides/8897.pdf", "author_site": "Eduardo Laber, Lucas Murtinho", "author": "Eduardo S Laber; Lucas Murtinho", "abstract": "The price of explainability for a clustering task can be defined as the unavoidable loss, in terms of the objective function, if we force the final partition to be explainable. Here, we study this price for the following clustering problems: $k$-means, $k$-medians, $k$-centers and maximum-spacing. We provide upper and lower bounds for a natural model where explainability is achieved via decision trees. For the $k$-means and $k$-medians problems our upper bounds improve those obtained by [Dasgupta et. al, ICML 20] for low dimensions. Another contribution is a simple and efficient algorithm for building explainable clusterings for the $k$-means problem. We provide empirical evidence that its performance is better than the current state of the art for decision-tree based explainable clustering.", "bibtex": "@InProceedings{pmlr-v139-laber21a,\n title = \t {On the price of explainability for some clustering problems},\n author = {Laber, Eduardo S and Murtinho, Lucas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5915--5925},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/laber21a/laber21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/laber21a.html},\n abstract = \t {The price of explainability for a clustering task can be defined as the unavoidable loss, in terms of the objective function, if we force the final partition to be explainable. Here, we study this price for the following clustering problems: $k$-means, $k$-medians, $k$-centers and maximum-spacing. We provide upper and lower bounds for a natural model where explainability is achieved via decision trees. For the $k$-means and $k$-medians problems our upper bounds improve those obtained by [Dasgupta et. al, ICML 20] for low dimensions. Another contribution is a simple and efficient algorithm for building explainable clusterings for the $k$-means problem. We provide empirical evidence that its performance is better than the current state of the art for decision-tree based explainable clustering.}\n}", "pdf": "http://proceedings.mlr.press/v139/laber21a/laber21a.pdf", "supp": "", "pdf_size": 461696, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9569069344864416237&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, PUC-Rio, Brazil; Department of Computer Science, PUC-Rio, Brazil", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/laber21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Pontifical Catholic University of Rio de Janeiro", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.puc-rio.br", "aff_unique_abbr": "PUC-Rio", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Rio de Janeiro", "aff_country_unique_index": "0;0", "aff_country_unique": "Brazil" }, { "title": "On-Off Center-Surround Receptive Fields for Accurate and Robust Image Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10391", "id": "10391", "proceeding": "http://proceedings.mlr.press/v139/babaiee21a.html", "slides": "", "author_site": "Zahra Babaiee, Ramin Hasani, Mathias Lechner, Daniela Rus, Radu Grosu", "author": "Zahra Babaiee; Ramin Hasani; Mathias Lechner; Daniela Rus; Radu Grosu", "abstract": "Robustness to variations in lighting conditions is a key objective for any deep vision system. To this end, our paper extends the receptive field of convolutional neural networks with two residual components, ubiquitous in the visual processing system of vertebrates: On-center and off-center pathways, with an excitatory center and inhibitory surround; OOCS for short. The On-center pathway is excited by the presence of a light stimulus in its center, but not in its surround, whereas the Off-center pathway is excited by the absence of a light stimulus in its center, but not in its surround. We design OOCS pathways via a difference of Gaussians, with their variance computed analytically from the size of the receptive fields. OOCS pathways complement each other in their response to light stimuli, ensuring this way a strong edge-detection capability, and as a result an accurate and robust inference under challenging lighting conditions. We provide extensive empirical evidence showing that networks supplied with OOCS pathways gain accuracy and illumination-robustness from the novel edge representation, compared to other baselines.", "bibtex": "@InProceedings{pmlr-v139-babaiee21a,\n title = \t {On-Off Center-Surround Receptive Fields for Accurate and Robust Image Classification},\n author = {Babaiee, Zahra and Hasani, Ramin and Lechner, Mathias and Rus, Daniela and Grosu, Radu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {478--489},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/babaiee21a/babaiee21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/babaiee21a.html},\n abstract = \t {Robustness to variations in lighting conditions is a key objective for any deep vision system. To this end, our paper extends the receptive field of convolutional neural networks with two residual components, ubiquitous in the visual processing system of vertebrates: On-center and off-center pathways, with an excitatory center and inhibitory surround; OOCS for short. The On-center pathway is excited by the presence of a light stimulus in its center, but not in its surround, whereas the Off-center pathway is excited by the absence of a light stimulus in its center, but not in its surround. We design OOCS pathways via a difference of Gaussians, with their variance computed analytically from the size of the receptive fields. OOCS pathways complement each other in their response to light stimuli, ensuring this way a strong edge-detection capability, and as a result an accurate and robust inference under challenging lighting conditions. We provide extensive empirical evidence showing that networks supplied with OOCS pathways gain accuracy and illumination-robustness from the novel edge representation, compared to other baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/babaiee21a/babaiee21a.pdf", "supp": "", "pdf_size": 4246561, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14788977888396220864&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "CPS, TU Wien; CSAIL, MIT; IST Austria; CSAIL, MIT; CPS, TU Wien", "aff_domain": "tuwien.ac.at; ; ; ; ", "email": "tuwien.ac.at; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/babaiee21a.html", "aff_unique_index": "0;1;2;1;0", "aff_unique_norm": "Technical University of Vienna;Massachusetts Institute of Technology;Institute of Science and Technology Austria", "aff_unique_dep": "Computer Science;Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://www.tuwien.ac.at;https://www.csail.mit.edu;https://www.ist.ac.at", "aff_unique_abbr": "TU Wien;MIT;IST Austria", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;1;0;1;0", "aff_country_unique": "Austria;United States" }, { "title": "On-Policy Deep Reinforcement Learning for the Average-Reward Criterion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10281", "id": "10281", "proceeding": "http://proceedings.mlr.press/v139/zhang21q.html", "slides": "", "author_site": "Yiming Zhang, Keith Ross", "author": "Yiming Zhang; Keith W Ross", "abstract": "We develop theory and algorithms for average-reward on-policy Reinforcement Learning (RL). We first consider bounding the difference of the long-term average reward for two policies. We show that previous work based on the discounted return (Schulman et al. 2015, Achiam et al. 2017) results in a non-meaningful lower bound in the average reward setting. By addressing the average-reward criterion directly, we then derive a novel bound which depends on the average divergence between the policies and on Kemeny\u2019s constant. Based on this bound, we develop an iterative procedure which produces a sequence of monotonically improved policies for the average reward criterion. This iterative procedure can then be combined with classic Deep Reinforcement Learning (DRL) methods, resulting in practical DRL algorithms that target the long-run average reward criterion. In particular, we demonstrate that Average-Reward TRPO (ATRPO), which adapts the on-policy TRPO algorithm to the average-reward criterion, significantly outperforms TRPO in the most challenging MuJuCo environments.", "bibtex": "@InProceedings{pmlr-v139-zhang21q,\n title = \t {On-Policy Deep Reinforcement Learning for the Average-Reward Criterion},\n author = {Zhang, Yiming and Ross, Keith W},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12535--12545},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21q/zhang21q.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21q.html},\n abstract = \t {We develop theory and algorithms for average-reward on-policy Reinforcement Learning (RL). We first consider bounding the difference of the long-term average reward for two policies. We show that previous work based on the discounted return (Schulman et al. 2015, Achiam et al. 2017) results in a non-meaningful lower bound in the average reward setting. By addressing the average-reward criterion directly, we then derive a novel bound which depends on the average divergence between the policies and on Kemeny\u2019s constant. Based on this bound, we develop an iterative procedure which produces a sequence of monotonically improved policies for the average reward criterion. This iterative procedure can then be combined with classic Deep Reinforcement Learning (DRL) methods, resulting in practical DRL algorithms that target the long-run average reward criterion. In particular, we demonstrate that Average-Reward TRPO (ATRPO), which adapts the on-policy TRPO algorithm to the average-reward criterion, significantly outperforms TRPO in the most challenging MuJuCo environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21q/zhang21q.pdf", "supp": "", "pdf_size": 824474, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5968841564732339602&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "New York University; New York University Shanghai", "aff_domain": "cs.nyu.edu; ", "email": "cs.nyu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhang21q.html", "aff_unique_index": "0;0", "aff_unique_norm": "New York University", "aff_unique_dep": "", "aff_unique_url": "https://www.nyu.edu", "aff_unique_abbr": "NYU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Shanghai", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On-the-fly Rectification for Robust Large-Vocabulary Topic Inference", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9807", "id": "9807", "proceeding": "http://proceedings.mlr.press/v139/lee21c.html", "slides": "", "author_site": "Moontae Lee, Sungjun Cho, Kun Dong, David Mimno, David Bindel", "author": "Moontae Lee; Sungjun Cho; Kun Dong; David Mimno; David Bindel", "abstract": "Across many data domains, co-occurrence statistics about the joint appearance of objects are powerfully informative. By transforming unsupervised learning problems into decompositions of co-occurrence statistics, spectral algorithms provide transparent and efficient algorithms for posterior inference such as latent topic analysis and community detection. As object vocabularies grow, however, it becomes rapidly more expensive to store and run inference algorithms on co-occurrence statistics. Rectifying co-occurrence, the key process to uphold model assumptions, becomes increasingly more vital in the presence of rare terms, but current techniques cannot scale to large vocabularies. We propose novel methods that simultaneously compress and rectify co-occurrence statistics, scaling gracefully with the size of vocabulary and the dimension of latent space. We also present new algorithms learning latent variables from the compressed statistics, and verify that our methods perform comparably to previous approaches on both textual and non-textual data.", "bibtex": "@InProceedings{pmlr-v139-lee21c,\n title = \t {On-the-fly Rectification for Robust Large-Vocabulary Topic Inference},\n author = {Lee, Moontae and Cho, Sungjun and Dong, Kun and Mimno, David and Bindel, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6087--6097},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21c/lee21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21c.html},\n abstract = \t {Across many data domains, co-occurrence statistics about the joint appearance of objects are powerfully informative. By transforming unsupervised learning problems into decompositions of co-occurrence statistics, spectral algorithms provide transparent and efficient algorithms for posterior inference such as latent topic analysis and community detection. As object vocabularies grow, however, it becomes rapidly more expensive to store and run inference algorithms on co-occurrence statistics. Rectifying co-occurrence, the key process to uphold model assumptions, becomes increasingly more vital in the presence of rare terms, but current techniques cannot scale to large vocabularies. We propose novel methods that simultaneously compress and rectify co-occurrence statistics, scaling gracefully with the size of vocabulary and the dimension of latent space. We also present new algorithms learning latent variables from the compressed statistics, and verify that our methods perform comparably to previous approaches on both textual and non-textual data.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21c/lee21c.pdf", "supp": "", "pdf_size": 1181940, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:aW_t2b2iPeIJ:scholar.google.com/&scioq=On-the-fly+Rectification+for+Robust+Large-Vocabulary+Topic+Inference&hl=en&as_sdt=0,5", "gs_version_total": 5, "aff": "Information and Decision Sciences, University of Illinois at Chicago, Chicago, Illinois, USA + Microsoft Research at Redmond, Redmond, Washington, USA; Computational Science and Engineering, Georgia Tech, Atlanta, Georgia, USA; Applied Mathematics, Cornell University, Ithaca, New York, USA; Information Science, Cornell University, Ithaca, New York, USA; Computer Science, Cornell University, Ithaca, New York, USA", "aff_domain": "uic.edu; ; ; ; ", "email": "uic.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lee21c.html", "aff_unique_index": "0+1;2;3;3;3", "aff_unique_norm": "University of Illinois at Chicago;Microsoft;Georgia Institute of Technology;Cornell University", "aff_unique_dep": "Information and Decision Sciences;Research;Computational Science and Engineering;Department of Applied Mathematics", "aff_unique_url": "https://www.uic.edu;https://www.microsoft.com/en-us/research;https://www.gatech.edu;https://www.cornell.edu", "aff_unique_abbr": "UIC;MSR;Georgia Tech;Cornell", "aff_campus_unique_index": "0+1;2;3;3;3", "aff_campus_unique": "Chicago;Redmond;Atlanta;Ithaca", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "One Pass Late Fusion Multi-view Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10273", "id": "10273", "proceeding": "http://proceedings.mlr.press/v139/liu21l.html", "slides": "", "author_site": "Xinwang Liu, Li Liu, Qing Liao, Siwei Wang, Yi Zhang, Wenxuan Tu, Chang Tang, Jiyuan Liu, En Zhu", "author": "Xinwang Liu; Li Liu; Qing Liao; Siwei Wang; Yi Zhang; Wenxuan Tu; Chang Tang; Jiyuan Liu; En Zhu", "abstract": "Existing late fusion multi-view clustering (LFMVC) optimally integrates a group of pre-specified base partition matrices to learn a consensus one. It is then taken as the input of the widely used k-means to generate the cluster labels. As observed, the learning of the consensus partition matrix and the generation of cluster labels are separately done. These two procedures lack necessary negotiation and can not best serve for each other, which may adversely affect the clustering performance. To address this issue, we propose to unify the aforementioned two learning procedures into a single optimization, in which the consensus partition matrix can better serve for the generation of cluster labels, and the latter is able to guide the learning of the former. To optimize the resultant optimization problem, we develop a four-step alternate algorithm with proved convergence. We theoretically analyze the clustering generalization error of the proposed algorithm on unseen data. Comprehensive experiments on multiple benchmark datasets demonstrate the superiority of our algorithm in terms of both clustering accuracy and computational efficiency. It is expected that the simplicity and effectiveness of our algorithm will make it a good option to be considered for practical multi-view clustering applications.", "bibtex": "@InProceedings{pmlr-v139-liu21l,\n title = \t {One Pass Late Fusion Multi-view Clustering},\n author = {Liu, Xinwang and Liu, Li and Liao, Qing and Wang, Siwei and Zhang, Yi and Tu, Wenxuan and Tang, Chang and Liu, Jiyuan and Zhu, En},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6850--6859},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21l/liu21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21l.html},\n abstract = \t {Existing late fusion multi-view clustering (LFMVC) optimally integrates a group of pre-specified base partition matrices to learn a consensus one. It is then taken as the input of the widely used k-means to generate the cluster labels. As observed, the learning of the consensus partition matrix and the generation of cluster labels are separately done. These two procedures lack necessary negotiation and can not best serve for each other, which may adversely affect the clustering performance. To address this issue, we propose to unify the aforementioned two learning procedures into a single optimization, in which the consensus partition matrix can better serve for the generation of cluster labels, and the latter is able to guide the learning of the former. To optimize the resultant optimization problem, we develop a four-step alternate algorithm with proved convergence. We theoretically analyze the clustering generalization error of the proposed algorithm on unseen data. Comprehensive experiments on multiple benchmark datasets demonstrate the superiority of our algorithm in terms of both clustering accuracy and computational efficiency. It is expected that the simplicity and effectiveness of our algorithm will make it a good option to be considered for practical multi-view clustering applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21l/liu21l.pdf", "supp": "", "pdf_size": 410100, "gs_citation": 127, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=145623590493594275&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computer, National University of Defense Technology; School of Computer, National University of Defense Technology; Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen; School of Computer, National University of Defense Technology; School of Computer, National University of Defense Technology; School of Computer, National University of Defense Technology; School of Computer Science, China University of Geosciences; School of Computer, National University of Defense Technology; School of Computer, National University of Defense Technology", "aff_domain": "nudt.edu.cn; ; ; ; ; ; ; ; ", "email": "nudt.edu.cn; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/liu21l.html", "aff_unique_index": "0;0;1;0;0;0;2;0;0", "aff_unique_norm": "National University of Defense Technology;Harbin Institute of Technology;China University of Geosciences", "aff_unique_dep": "School of Computer;Department of Computer Science and Technology;School of Computer Science", "aff_unique_url": "http://www.nudt.edu.cn/;http://www.hit.edu.cn/;http://www.cug.edu.cn", "aff_unique_abbr": "NUDT;HIT;CUG", "aff_campus_unique_index": "1", "aff_campus_unique": ";Shenzhen", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "One for One, or All for All: Equilibria and Optimality of Collaboration in Federated Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9419", "id": "9419", "proceeding": "http://proceedings.mlr.press/v139/blum21a.html", "slides": "", "author_site": "Avrim Blum, Nika Haghtalab, Richard Lanas Phillips, Han Shao", "author": "Avrim Blum; Nika Haghtalab; Richard Lanas Phillips; Han Shao", "abstract": "In recent years, federated learning has been embraced as an approach for bringing about collaboration across large populations of learning agents. However, little is known about how collaboration protocols should take agents\u2019 incentives into account when allocating individual resources for communal learning in order to maintain such collaborations. Inspired by game theoretic notions, this paper introduces a framework for incentive-aware learning and data sharing in federated learning. Our stable and envy-free equilibria capture notions of collaboration in the presence of agents interested in meeting their learning objectives while keeping their own sample collection burden low. For example, in an envy-free equilibrium, no agent would wish to swap their sampling burden with any other agent and in a stable equilibrium, no agent would wish to unilaterally reduce their sampling burden. In addition to formalizing this framework, our contributions include characterizing the structural properties of such equilibria, proving when they exist, and showing how they can be computed. Furthermore, we compare the sample complexity of incentive-aware collaboration with that of optimal collaboration when one ignores agents\u2019 incentives.", "bibtex": "@InProceedings{pmlr-v139-blum21a,\n title = \t {One for One, or All for All: Equilibria and Optimality of Collaboration in Federated Learning},\n author = {Blum, Avrim and Haghtalab, Nika and Phillips, Richard Lanas and Shao, Han},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1005--1014},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/blum21a/blum21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/blum21a.html},\n abstract = \t {In recent years, federated learning has been embraced as an approach for bringing about collaboration across large populations of learning agents. However, little is known about how collaboration protocols should take agents\u2019 incentives into account when allocating individual resources for communal learning in order to maintain such collaborations. Inspired by game theoretic notions, this paper introduces a framework for incentive-aware learning and data sharing in federated learning. Our stable and envy-free equilibria capture notions of collaboration in the presence of agents interested in meeting their learning objectives while keeping their own sample collection burden low. For example, in an envy-free equilibrium, no agent would wish to swap their sampling burden with any other agent and in a stable equilibrium, no agent would wish to unilaterally reduce their sampling burden. In addition to formalizing this framework, our contributions include characterizing the structural properties of such equilibria, proving when they exist, and showing how they can be computed. Furthermore, we compare the sample complexity of incentive-aware collaboration with that of optimal collaboration when one ignores agents\u2019 incentives.}\n}", "pdf": "http://proceedings.mlr.press/v139/blum21a/blum21a.pdf", "supp": "", "pdf_size": 389883, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3850848411825917524&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Toyota Technological Institute; University of California, Berkeley; Cornell University; Toyota Technological Institute", "aff_domain": "tti.cs.cmu.edu;berkeley.edu;cornell.edu;tti.cs.cmu.edu", "email": "tti.cs.cmu.edu;berkeley.edu;cornell.edu;tti.cs.cmu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/blum21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Toyota Technological Institute;University of California, Berkeley;Cornell University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.tti.ac.jp;https://www.berkeley.edu;https://www.cornell.edu", "aff_unique_abbr": "TTI;UC Berkeley;Cornell", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "Japan;United States" }, { "title": "One-sided Frank-Wolfe algorithms for saddle problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8533", "id": "8533", "proceeding": "http://proceedings.mlr.press/v139/kolmogorov21a.html", "slides": "", "author_site": "Vladimir Kolmogorov, Thomas Pock", "author": "Vladimir Kolmogorov; Thomas Pock", "abstract": "We study a class of convex-concave saddle-point problems of the form $\\min_x\\max_y \u27e8Kx,y\u27e9+f_{\\cal P}(x)-h^*(y)$ where $K$ is a linear operator, $f_{\\cal P}$ is the sum of a convex function $f$ with a Lipschitz-continuous gradient and the indicator function of a bounded convex polytope ${\\cal P}$, and $h^\\ast$ is a convex (possibly nonsmooth) function. Such problem arises, for example, as a Lagrangian relaxation of various discrete optimization problems. Our main assumptions are the existence of an efficient {\\em linear minimization oracle} ($lmo$) for $f_{\\cal P}$ and an efficient {\\em proximal map} ($prox$) for $h^*$ which motivate the solution via a blend of proximal primal-dual algorithms and Frank-Wolfe algorithms. In case $h^*$ is the indicator function of a linear constraint and function $f$ is quadratic, we show a $O(1/n^2)$ convergence rate on the dual objective, requiring $O(n \\log n)$ calls of $lmo$. If the problem comes from the constrained optimization problem $\\min_{x\\in\\mathbb R^d}\\{f_{\\cal P}(x)\\:|\\:Ax-b=0\\}$ then we additionally get bound $O(1/n^2)$ both on the primal gap and on the infeasibility gap. In the most general case, we show a $O(1/n)$ convergence rate of the primal-dual gap again requiring $O(n\\log n)$ calls of $lmo$. To the best of our knowledge, this improves on the known convergence rates for the considered class of saddle-point problems. We show applications to labeling problems frequently appearing in machine learning and computer vision.", "bibtex": "@InProceedings{pmlr-v139-kolmogorov21a,\n title = \t {One-sided Frank-Wolfe algorithms for saddle problems},\n author = {Kolmogorov, Vladimir and Pock, Thomas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5665--5675},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kolmogorov21a/kolmogorov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kolmogorov21a.html},\n abstract = \t {We study a class of convex-concave saddle-point problems of the form $\\min_x\\max_y \u27e8Kx,y\u27e9+f_{\\cal P}(x)-h^*(y)$ where $K$ is a linear operator, $f_{\\cal P}$ is the sum of a convex function $f$ with a Lipschitz-continuous gradient and the indicator function of a bounded convex polytope ${\\cal P}$, and $h^\\ast$ is a convex (possibly nonsmooth) function. Such problem arises, for example, as a Lagrangian relaxation of various discrete optimization problems. Our main assumptions are the existence of an efficient {\\em linear minimization oracle} ($lmo$) for $f_{\\cal P}$ and an efficient {\\em proximal map} ($prox$) for $h^*$ which motivate the solution via a blend of proximal primal-dual algorithms and Frank-Wolfe algorithms. In case $h^*$ is the indicator function of a linear constraint and function $f$ is quadratic, we show a $O(1/n^2)$ convergence rate on the dual objective, requiring $O(n \\log n)$ calls of $lmo$. If the problem comes from the constrained optimization problem $\\min_{x\\in\\mathbb R^d}\\{f_{\\cal P}(x)\\:|\\:Ax-b=0\\}$ then we additionally get bound $O(1/n^2)$ both on the primal gap and on the infeasibility gap. In the most general case, we show a $O(1/n)$ convergence rate of the primal-dual gap again requiring $O(n\\log n)$ calls of $lmo$. To the best of our knowledge, this improves on the known convergence rates for the considered class of saddle-point problems. We show applications to labeling problems frequently appearing in machine learning and computer vision.}\n}", "pdf": "http://proceedings.mlr.press/v139/kolmogorov21a/kolmogorov21a.pdf", "supp": "", "pdf_size": 3033389, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6082373230547871613&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Institute of Science and Technology Austria; Institute of Computer Graphics and Vision, Graz University of Technology", "aff_domain": "ist.ac.at;icg.tugraz.at", "email": "ist.ac.at;icg.tugraz.at", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kolmogorov21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Institute of Science and Technology Austria;Graz University of Technology", "aff_unique_dep": ";Institute of Computer Graphics and Vision", "aff_unique_url": "https://www.ist.ac.at;https://www.tugraz.at", "aff_unique_abbr": "IST Austria;TU Graz", "aff_campus_unique_index": "1", "aff_campus_unique": ";Graz", "aff_country_unique_index": "0;0", "aff_country_unique": "Austria" }, { "title": "Oneshot Differentially Private Top-k Selection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10061", "id": "10061", "proceeding": "http://proceedings.mlr.press/v139/qiao21b.html", "slides": "", "author_site": "Gang Qiao, Weijie Su, Li Zhang", "author": "Gang Qiao; Weijie Su; Li Zhang", "abstract": "Being able to efficiently and accurately select the top-$k$ elements with differential privacy is an integral component of various private data analysis tasks. In this paper, we present the oneshot Laplace mechanism, which generalizes the well-known Report Noisy Max\u00a0\\cite{dwork2014algorithmic} mechanism to reporting noisy top-$k$ elements. We show that the oneshot Laplace mechanism with a noise level of $\\widetilde{O}(\\sqrt{k}/\\eps)$ is approximately differentially private. Compared to the previous peeling approach of running Report Noisy Max $k$ times, the oneshot Laplace mechanism only adds noises and computes the top $k$ elements once, hence much more efficient for large $k$. In addition, our proof of privacy relies on a novel coupling technique that bypasses the composition theorems so without the linear dependence on $k$ which is inherent to various composition theorems. Finally, we present a novel application of efficient top-$k$ selection in the classical problem of ranking from pairwise comparisons.", "bibtex": "@InProceedings{pmlr-v139-qiao21b,\n title = \t {Oneshot Differentially Private Top-k Selection},\n author = {Qiao, Gang and Su, Weijie and Zhang, Li},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8672--8681},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiao21b/qiao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiao21b.html},\n abstract = \t {Being able to efficiently and accurately select the top-$k$ elements with differential privacy is an integral component of various private data analysis tasks. In this paper, we present the oneshot Laplace mechanism, which generalizes the well-known Report Noisy Max\u00a0\\cite{dwork2014algorithmic} mechanism to reporting noisy top-$k$ elements. We show that the oneshot Laplace mechanism with a noise level of $\\widetilde{O}(\\sqrt{k}/\\eps)$ is approximately differentially private. Compared to the previous peeling approach of running Report Noisy Max $k$ times, the oneshot Laplace mechanism only adds noises and computes the top $k$ elements once, hence much more efficient for large $k$. In addition, our proof of privacy relies on a novel coupling technique that bypasses the composition theorems so without the linear dependence on $k$ which is inherent to various composition theorems. Finally, we present a novel application of efficient top-$k$ selection in the classical problem of ranking from pairwise comparisons.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiao21b/qiao21b.pdf", "supp": "", "pdf_size": 305114, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15444492630669890991&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Statistics, University of Michigan, Ann Arbor, MI, USA; The Wharton School, University of Pennsylvania, Philadelphia, PA, USA; Google Research, Mountain View, CA, USA", "aff_domain": "umich.edu;wharton.upenn.edu;google.com", "email": "umich.edu;wharton.upenn.edu;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/qiao21b.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of Michigan;University of Pennsylvania;Google", "aff_unique_dep": "Department of Statistics;The Wharton School;Google Research", "aff_unique_url": "https://www.umich.edu;https://www.wharton.upenn.edu;https://research.google", "aff_unique_abbr": "UM;UPenn;Google", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Ann Arbor;Philadelphia;Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Online A-Optimal Design and Active Linear Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9369", "id": "9369", "proceeding": "http://proceedings.mlr.press/v139/fontaine21a.html", "slides": "", "author_site": "Xavier Fontaine, Pierre Perrault, Michal Valko, Vianney Perchet", "author": "Xavier Fontaine; Pierre Perrault; Michal Valko; Vianney Perchet", "abstract": "We consider in this paper the problem of optimal experiment design where a decision maker can choose which points to sample to obtain an estimate $\\hat{\\beta}$ of the hidden parameter $\\beta^{\\star}$ of an underlying linear model. The key challenge of this work lies in the heteroscedasticity assumption that we make, meaning that each covariate has a different and unknown variance. The goal of the decision maker is then to figure out on the fly the optimal way to allocate the total budget of $T$ samples between covariates, as sampling several times a specific one will reduce the variance of the estimated model around it (but at the cost of a possible higher variance elsewhere). By trying to minimize the $\\ell^2$-loss $\\mathbb{E} [\\lVert\\hat{\\beta}-\\beta^{\\star}\\rVert^2]$ the decision maker is actually minimizing the trace of the covariance matrix of the problem, which corresponds then to online A-optimal design. Combining techniques from bandit and convex optimization we propose a new active sampling algorithm and we compare it with existing ones. We provide theoretical guarantees of this algorithm in different settings, including a $\\mathcal{O}(T^{-2})$ regret bound in the case where the covariates form a basis of the feature space, generalizing and improving existing results. Numerical experiments validate our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-fontaine21a,\n title = \t {Online A-Optimal Design and Active Linear Regression},\n author = {Fontaine, Xavier and Perrault, Pierre and Valko, Michal and Perchet, Vianney},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3374--3383},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fontaine21a/fontaine21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fontaine21a.html},\n abstract = \t {We consider in this paper the problem of optimal experiment design where a decision maker can choose which points to sample to obtain an estimate $\\hat{\\beta}$ of the hidden parameter $\\beta^{\\star}$ of an underlying linear model. The key challenge of this work lies in the heteroscedasticity assumption that we make, meaning that each covariate has a different and unknown variance. The goal of the decision maker is then to figure out on the fly the optimal way to allocate the total budget of $T$ samples between covariates, as sampling several times a specific one will reduce the variance of the estimated model around it (but at the cost of a possible higher variance elsewhere). By trying to minimize the $\\ell^2$-loss $\\mathbb{E} [\\lVert\\hat{\\beta}-\\beta^{\\star}\\rVert^2]$ the decision maker is actually minimizing the trace of the covariance matrix of the problem, which corresponds then to online A-optimal design. Combining techniques from bandit and convex optimization we propose a new active sampling algorithm and we compare it with existing ones. We provide theoretical guarantees of this algorithm in different settings, including a $\\mathcal{O}(T^{-2})$ regret bound in the case where the covariates form a basis of the feature space, generalizing and improving existing results. Numerical experiments validate our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/fontaine21a/fontaine21a.pdf", "supp": "", "pdf_size": 329167, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6114688297477037978&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Centre Borelli, ENS Paris-Saclay, Palaiseau, France; Idemia, Courbevoie, France; Google DeepMind, Paris, France; CREST, ENSAE, Palaiseau, France + Criteo AI Lab, Paris, France", "aff_domain": "polytechnique.edu; ; ; ", "email": "polytechnique.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fontaine21a.html", "aff_unique_index": "0;1;2;3+4", "aff_unique_norm": "\u00c9cole Normale Sup\u00e9rieure Paris-Saclay;IDEMIA;Google;CREST;Criteo", "aff_unique_dep": "Centre Borelli;;Google DeepMind;;Criteo AI Lab", "aff_unique_url": "https://www.ens-paris-saclay.fr;https://www.idemia.com;https://deepmind.com;;https://www.criteo.com", "aff_unique_abbr": "ENS Paris-Saclay;;DeepMind;;Criteo", "aff_campus_unique_index": "0;2;0+2", "aff_campus_unique": "Palaiseau;;Paris", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "France" }, { "title": "Online Graph Dictionary Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8549", "id": "8549", "proceeding": "http://proceedings.mlr.press/v139/vincent-cuaz21a.html", "slides": "", "author_site": "C\u00e9dric Vincent-Cuaz, Titouan Vayer, R\u00e9mi Flamary, Marco Corneli, Nicolas Courty", "author": "C\u00e9dric Vincent-Cuaz; Titouan Vayer; R\u00e9mi Flamary; Marco Corneli; Nicolas Courty", "abstract": "Dictionary learning is a key tool for representation learning, that explains the data as linear combination of few basic elements. Yet, this analysis is not amenable in the context of graph learning, as graphs usually belong to different metric spaces. We fill this gap by proposing a new online Graph Dictionary Learning approach, which uses the Gromov Wasserstein divergence for the data fitting term. In our work, graphs are encoded through their nodes\u2019 pairwise relations and modeled as convex combination of graph atoms, i.e. dictionary elements, estimated thanks to an online stochastic algorithm, which operates on a dataset of unregistered graphs with potentially different number of nodes. Our approach naturally extends to labeled graphs, and is completed by a novel upper bound that can be used as a fast approximation of Gromov Wasserstein in the embedding space. We provide numerical evidences showing the interest of our approach for unsupervised embedding of graph datasets and for online graph subspace estimation and tracking.", "bibtex": "@InProceedings{pmlr-v139-vincent-cuaz21a,\n title = \t {Online Graph Dictionary Learning},\n author = {Vincent-Cuaz, C{\\'e}dric and Vayer, Titouan and Flamary, R{\\'e}mi and Corneli, Marco and Courty, Nicolas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10564--10574},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vincent-cuaz21a/vincent-cuaz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vincent-cuaz21a.html},\n abstract = \t {Dictionary learning is a key tool for representation learning, that explains the data as linear combination of few basic elements. Yet, this analysis is not amenable in the context of graph learning, as graphs usually belong to different metric spaces. We fill this gap by proposing a new online Graph Dictionary Learning approach, which uses the Gromov Wasserstein divergence for the data fitting term. In our work, graphs are encoded through their nodes\u2019 pairwise relations and modeled as convex combination of graph atoms, i.e. dictionary elements, estimated thanks to an online stochastic algorithm, which operates on a dataset of unregistered graphs with potentially different number of nodes. Our approach naturally extends to labeled graphs, and is completed by a novel upper bound that can be used as a fast approximation of Gromov Wasserstein in the embedding space. We provide numerical evidences showing the interest of our approach for unsupervised embedding of graph datasets and for online graph subspace estimation and tracking.}\n}", "pdf": "http://proceedings.mlr.press/v139/vincent-cuaz21a/vincent-cuaz21a.pdf", "supp": "", "pdf_size": 3674140, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7527452774562329300&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Univ.C \u02c6ote d\u2019Azur, Inria, CNRS, LJAD, Maasai, Nice, France; ENS de Lyon, LIP UMR 5668, Lyon, France; Ecole Polytechnique, CMAP, UMR 7641, Palaiseau, France; Univ.C \u02c6ote d\u2019Azur, Center of Modeling, Simulation & Interaction, Nice, France; Univ.Bretagne-Sud, CNRS, IRISA, Vannes, France", "aff_domain": "inria.fr; ; ; ; ", "email": "inria.fr; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/vincent-cuaz21a.html", "aff_unique_index": "0;1;2;0;3", "aff_unique_norm": "University of Nice Sophia Antipolis;\u00c9cole Normale Sup\u00e9rieure de Lyon;Ecole Polytechnique;University of Bretagne-Sud", "aff_unique_dep": "Laboratoire Jean-Alexandre Dieudonn\u00e9 (LJAD);LIP UMR 5668;CMAP, UMR 7641;IRISA", "aff_unique_url": "https://www.unice.fr;https://www.ens-lyon.fr;https://www.ecp.fr;https://www.univ-ubs.fr", "aff_unique_abbr": "UNICE;ENS de Lyon;Ecole Polytechnique;UBS", "aff_campus_unique_index": "0;1;2;3;4", "aff_campus_unique": "Sophia Antipolis;Lyon;Palaiseau;Nice;Vannes", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "France" }, { "title": "Online Learning for Load Balancing of Unknown Monotone Resource Allocation Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9009", "id": "9009", "proceeding": "http://proceedings.mlr.press/v139/bistritz21a.html", "slides": "", "author_site": "Ilai Bistritz, Nicholas Bambos", "author": "Ilai Bistritz; Nicholas Bambos", "abstract": "Consider N players that each uses a mixture of K resources. Each of the players\u2019 reward functions includes a linear pricing term for each resource that is controlled by the game manager. We assume that the game is strongly monotone, so if each player runs gradient descent, the dynamics converge to a unique Nash equilibrium (NE). Unfortunately, this NE can be inefficient since the total load on a given resource can be very high. In principle, we can control the total loads by tuning the coefficients of the pricing terms. However, finding pricing coefficients that balance the loads requires knowing the players\u2019 reward functions and their action sets. Obtaining this game structure information is infeasible in a large-scale network and violates the users\u2019 privacy. To overcome this, we propose a simple algorithm that learns to shift the NE of the game to meet the total load constraints by adjusting the pricing coefficients in an online manner. Our algorithm only requires the total load per resource as feedback and does not need to know the reward functions or the action sets. We prove that our algorithm guarantees convergence in L2 to a NE that meets target total load constraints. Simulations show the effectiveness of our approach when applied to smart grid demand-side management or power control in wireless networks.", "bibtex": "@InProceedings{pmlr-v139-bistritz21a,\n title = \t {Online Learning for Load Balancing of Unknown Monotone Resource Allocation Games},\n author = {Bistritz, Ilai and Bambos, Nicholas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {968--979},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bistritz21a/bistritz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bistritz21a.html},\n abstract = \t {Consider N players that each uses a mixture of K resources. Each of the players\u2019 reward functions includes a linear pricing term for each resource that is controlled by the game manager. We assume that the game is strongly monotone, so if each player runs gradient descent, the dynamics converge to a unique Nash equilibrium (NE). Unfortunately, this NE can be inefficient since the total load on a given resource can be very high. In principle, we can control the total loads by tuning the coefficients of the pricing terms. However, finding pricing coefficients that balance the loads requires knowing the players\u2019 reward functions and their action sets. Obtaining this game structure information is infeasible in a large-scale network and violates the users\u2019 privacy. To overcome this, we propose a simple algorithm that learns to shift the NE of the game to meet the total load constraints by adjusting the pricing coefficients in an online manner. Our algorithm only requires the total load per resource as feedback and does not need to know the reward functions or the action sets. We prove that our algorithm guarantees convergence in L2 to a NE that meets target total load constraints. Simulations show the effectiveness of our approach when applied to smart grid demand-side management or power control in wireless networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/bistritz21a/bistritz21a.pdf", "supp": "", "pdf_size": 628197, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10949538565178394328&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Department of Electrical Engineering, Stanford University; Department of Electrical Engineering, Stanford University", "aff_domain": "stanford.edu; ", "email": "stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bistritz21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Online Learning in Unknown Markov Games", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9341", "id": "9341", "proceeding": "http://proceedings.mlr.press/v139/tian21b.html", "slides": "", "author_site": "Yi Tian, Yuanhao Wang, Tiancheng Yu, Suvrit Sra", "author": "Yi Tian; Yuanhao Wang; Tiancheng Yu; Suvrit Sra", "abstract": "We study online learning in unknown Markov games, a problem that arises in episodic multi-agent reinforcement learning where the actions of the opponents are unobservable. We show that in this challenging setting, achieving sublinear regret against the best response in hindsight is statistically hard. We then consider a weaker notion of regret by competing with the \\emph{minimax value} of the game, and present an algorithm that achieves a sublinear $\\tilde{\\mathcal{O}}(K^{2/3})$ regret after $K$ episodes. This is the first sublinear regret bound (to our knowledge) for online learning in unknown Markov games. Importantly, our regret bound is independent of the size of the opponents\u2019 action spaces. As a result, even when the opponents\u2019 actions are fully observable, our regret bound improves upon existing analysis (e.g., (Xie et al., 2020)) by an exponential factor in the number of opponents.", "bibtex": "@InProceedings{pmlr-v139-tian21b,\n title = \t {Online Learning in Unknown Markov Games},\n author = {Tian, Yi and Wang, Yuanhao and Yu, Tiancheng and Sra, Suvrit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10279--10288},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tian21b/tian21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/tian21b.html},\n abstract = \t {We study online learning in unknown Markov games, a problem that arises in episodic multi-agent reinforcement learning where the actions of the opponents are unobservable. We show that in this challenging setting, achieving sublinear regret against the best response in hindsight is statistically hard. We then consider a weaker notion of regret by competing with the \\emph{minimax value} of the game, and present an algorithm that achieves a sublinear $\\tilde{\\mathcal{O}}(K^{2/3})$ regret after $K$ episodes. This is the first sublinear regret bound (to our knowledge) for online learning in unknown Markov games. Importantly, our regret bound is independent of the size of the opponents\u2019 action spaces. As a result, even when the opponents\u2019 actions are fully observable, our regret bound improves upon existing analysis (e.g., (Xie et al., 2020)) by an exponential factor in the number of opponents.}\n}", "pdf": "http://proceedings.mlr.press/v139/tian21b/tian21b.pdf", "supp": "", "pdf_size": 399665, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12262414336385733906&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of EECS, MIT; Department of Computer Science, Princeton University; Department of EECS, MIT; Department of EECS, MIT", "aff_domain": "mit.edu; ; ;mit.edu", "email": "mit.edu; ; ;mit.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tian21b.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Massachusetts Institute of Technology;Princeton University", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;Department of Computer Science", "aff_unique_url": "https://web.mit.edu;https://www.princeton.edu", "aff_unique_abbr": "MIT;Princeton", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Online Learning with Optimism and Delay", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9561", "id": "9561", "proceeding": "http://proceedings.mlr.press/v139/flaspohler21a.html", "slides": "", "author_site": "Genevieve Flaspohler, Francesco Orabona, Judah Cohen, Soukayna Mouatadid, Miruna Oprescu, Paulo Orenstein, Lester Mackey", "author": "Genevieve E Flaspohler; Francesco Orabona; Judah Cohen; Soukayna Mouatadid; Miruna Oprescu; Paulo Orenstein; Lester Mackey", "abstract": "Inspired by the demands of real-time climate and weather forecasting, we develop optimistic online learning algorithms that require no parameter tuning and have optimal regret guarantees under delayed feedback. Our algorithms\u2014DORM, DORM+, and AdaHedgeD\u2014arise from a novel reduction of delayed online learning to optimistic online learning that reveals how optimistic hints can mitigate the regret penalty caused by delay. We pair this delay-as-optimism perspective with a new analysis of optimistic learning that exposes its robustness to hinting errors and a new meta-algorithm for learning effective hinting strategies in the presence of delay. We conclude by benchmarking our algorithms on four subseasonal climate forecasting tasks, demonstrating low regret relative to state-of-the-art forecasting models.", "bibtex": "@InProceedings{pmlr-v139-flaspohler21a,\n title = \t {Online Learning with Optimism and Delay},\n author = {Flaspohler, Genevieve E and Orabona, Francesco and Cohen, Judah and Mouatadid, Soukayna and Oprescu, Miruna and Orenstein, Paulo and Mackey, Lester},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3363--3373},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/flaspohler21a/flaspohler21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/flaspohler21a.html},\n abstract = \t {Inspired by the demands of real-time climate and weather forecasting, we develop optimistic online learning algorithms that require no parameter tuning and have optimal regret guarantees under delayed feedback. Our algorithms\u2014DORM, DORM+, and AdaHedgeD\u2014arise from a novel reduction of delayed online learning to optimistic online learning that reveals how optimistic hints can mitigate the regret penalty caused by delay. We pair this delay-as-optimism perspective with a new analysis of optimistic learning that exposes its robustness to hinting errors and a new meta-algorithm for learning effective hinting strategies in the presence of delay. We conclude by benchmarking our algorithms on four subseasonal climate forecasting tasks, demonstrating low regret relative to state-of-the-art forecasting models.}\n}", "pdf": "http://proceedings.mlr.press/v139/flaspohler21a/flaspohler21a.pdf", "supp": "", "pdf_size": 1710750, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3051720071690017995&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/flaspohler21a.html" }, { "title": "Online Limited Memory Neural-Linear Bandits with Likelihood Matching", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9331", "id": "9331", "proceeding": "http://proceedings.mlr.press/v139/nabati21a.html", "slides": "/media/icml-2021/Slides/9331.pdf", "author_site": "Ofir Nabati, Tom Zahavy, Shie Mannor", "author": "Ofir Nabati; Tom Zahavy; Shie Mannor", "abstract": "We study neural-linear bandits for solving problems where {\\em both} exploration and representation learning play an important role. Neural-linear bandits harnesses the representation power of Deep Neural Networks (DNNs) and combines it with efficient exploration mechanisms by leveraging uncertainty estimation of the model, designed for linear contextual bandits on top of the last hidden layer. In order to mitigate the problem of representation change during the process, new uncertainty estimations are computed using stored data from an unlimited buffer. Nevertheless, when the amount of stored data is limited, a phenomenon called catastrophic forgetting emerges. To alleviate this, we propose a likelihood matching algorithm that is resilient to catastrophic forgetting and is completely online. We applied our algorithm, Limited Memory Neural-Linear with Likelihood Matching (NeuralLinear-LiM2) on a variety of datasets and observed that our algorithm achieves comparable performance to the unlimited memory approach while exhibits resilience to catastrophic forgetting.", "bibtex": "@InProceedings{pmlr-v139-nabati21a,\n title = \t {Online Limited Memory Neural-Linear Bandits with Likelihood Matching},\n author = {Nabati, Ofir and Zahavy, Tom and Mannor, Shie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7905--7915},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nabati21a/nabati21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nabati21a.html},\n abstract = \t {We study neural-linear bandits for solving problems where {\\em both} exploration and representation learning play an important role. Neural-linear bandits harnesses the representation power of Deep Neural Networks (DNNs) and combines it with efficient exploration mechanisms by leveraging uncertainty estimation of the model, designed for linear contextual bandits on top of the last hidden layer. In order to mitigate the problem of representation change during the process, new uncertainty estimations are computed using stored data from an unlimited buffer. Nevertheless, when the amount of stored data is limited, a phenomenon called catastrophic forgetting emerges. To alleviate this, we propose a likelihood matching algorithm that is resilient to catastrophic forgetting and is completely online. We applied our algorithm, Limited Memory Neural-Linear with Likelihood Matching (NeuralLinear-LiM2) on a variety of datasets and observed that our algorithm achieves comparable performance to the unlimited memory approach while exhibits resilience to catastrophic forgetting.}\n}", "pdf": "http://proceedings.mlr.press/v139/nabati21a/nabati21a.pdf", "supp": "", "pdf_size": 848825, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14479099629053634463&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical-Engineering, Technion Institute of Technology, Israel; Department of Electrical-Engineering, Technion Institute of Technology, Israel + DeepMind; Department of Electrical-Engineering, Technion Institute of Technology, Israel + Nvidia Research", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nabati21a.html", "aff_unique_index": "0;0+1;0+2", "aff_unique_norm": "Technion Institute of Technology;DeepMind;NVIDIA", "aff_unique_dep": "Department of Electrical-Engineering;;NVIDIA Research", "aff_unique_url": "https://www.technion.ac.il;https://deepmind.com;https://www.nvidia.com/research", "aff_unique_abbr": "Technion;DeepMind;NVIDIA", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0;0+1;0+2", "aff_country_unique": "Israel;United Kingdom;United States" }, { "title": "Online Optimization in Games via Control Theory: Connecting Regret, Passivity and Poincar\u00e9 Recurrence", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9837", "id": "9837", "proceeding": "http://proceedings.mlr.press/v139/cheung21a.html", "slides": "", "author_site": "Yun Kuen Cheung, Georgios Piliouras", "author": "Yun Kuen Cheung; Georgios Piliouras", "abstract": "We present a novel control-theoretic understanding of online optimization and learning in games, via the notion of passivity. Passivity is a fundamental concept in control theory, which abstracts energy conservation and dissipation in physical systems. It has become a standard tool in analysis of general feedback systems, to which game dynamics belong. Our starting point is to show that all continuous-time Follow-the-Regularized-Leader (FTRL) dynamics, which include the well-known Replicator Dynamic, are lossless, i.e. it is passive with no energy dissipation. Interestingly, we prove that passivity implies bounded regret, connecting two fundamental primitives of control theory and online optimization. The observation of energy conservation in FTRL inspires us to present a family of lossless learning dynamics, each of which has an underlying energy function with a simple gradient structure. This family is closed under convex combination; as an immediate corollary, any convex combination of FTRL dynamics is lossless and thus has bounded regret. This allows us to extend the framework of Fox & Shamma [Games 2013] to prove not just global asymptotic stability results for game dynamics, but Poincar{\u00e9} recurrence results as well. Intuitively, when a lossless game (e.g. graphical constant-sum game) is coupled with lossless learning dynamic, their interconnection is also lossless, which results in a pendulum-like energy-preserving recurrent behavior, generalizing Piliouras & Shamma [SODA 2014] and Mertikopoulos et al. [SODA 2018].", "bibtex": "@InProceedings{pmlr-v139-cheung21a,\n title = \t {Online Optimization in Games via Control Theory: Connecting Regret, Passivity and Poincar{\u00e9} Recurrence},\n author = {Cheung, Yun Kuen and Piliouras, Georgios},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1855--1865},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cheung21a/cheung21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cheung21a.html},\n abstract = \t {We present a novel control-theoretic understanding of online optimization and learning in games, via the notion of passivity. Passivity is a fundamental concept in control theory, which abstracts energy conservation and dissipation in physical systems. It has become a standard tool in analysis of general feedback systems, to which game dynamics belong. Our starting point is to show that all continuous-time Follow-the-Regularized-Leader (FTRL) dynamics, which include the well-known Replicator Dynamic, are lossless, i.e. it is passive with no energy dissipation. Interestingly, we prove that passivity implies bounded regret, connecting two fundamental primitives of control theory and online optimization. The observation of energy conservation in FTRL inspires us to present a family of lossless learning dynamics, each of which has an underlying energy function with a simple gradient structure. This family is closed under convex combination; as an immediate corollary, any convex combination of FTRL dynamics is lossless and thus has bounded regret. This allows us to extend the framework of Fox & Shamma [Games 2013] to prove not just global asymptotic stability results for game dynamics, but Poincar{\u00e9} recurrence results as well. Intuitively, when a lossless game (e.g. graphical constant-sum game) is coupled with lossless learning dynamic, their interconnection is also lossless, which results in a pendulum-like energy-preserving recurrent behavior, generalizing Piliouras & Shamma [SODA 2014] and Mertikopoulos et al. [SODA 2018].}\n}", "pdf": "http://proceedings.mlr.press/v139/cheung21a/cheung21a.pdf", "supp": "", "pdf_size": 571561, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3826280318080210598&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 3, "aff": "Royal Holloway University of London, Egham, United Kingdom; Singapore University of Technology and Design, Singapore", "aff_domain": "rhul.ac.uk;sutd.edu.sg", "email": "rhul.ac.uk;sutd.edu.sg", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/cheung21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Royal Holloway University of London;Singapore University of Technology and Design", "aff_unique_dep": ";", "aff_unique_url": "https://www.royalholloway.ac.uk;https://www.sutd.edu.sg", "aff_unique_abbr": "RHUL;SUTD", "aff_campus_unique_index": "0", "aff_campus_unique": "Egham;", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;Singapore" }, { "title": "Online Policy Gradient for Model Free Learning of Linear Quadratic Regulators with $\\sqrt$T Regret", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10103", "id": "10103", "proceeding": "http://proceedings.mlr.press/v139/cassel21a.html", "slides": "", "author_site": "Asaf Cassel, Tomer Koren", "author": "Asaf B Cassel; Tomer Koren", "abstract": "We consider the task of learning to control a linear dynamical system under fixed quadratic costs, known as the Linear Quadratic Regulator (LQR) problem. While model-free approaches are often favorable in practice, thus far only model-based methods, which rely on costly system identification, have been shown to achieve regret that scales with the optimal dependence on the time horizon T. We present the first model-free algorithm that achieves similar regret guarantees. Our method relies on an efficient policy gradient scheme, and a novel and tighter analysis of the cost of exploration in policy space in this setting.", "bibtex": "@InProceedings{pmlr-v139-cassel21a,\n title = \t {Online Policy Gradient for Model Free Learning of Linear Quadratic Regulators with $\\sqrt{}$T Regret},\n author = {Cassel, Asaf B and Koren, Tomer},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1304--1313},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cassel21a/cassel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cassel21a.html},\n abstract = \t {We consider the task of learning to control a linear dynamical system under fixed quadratic costs, known as the Linear Quadratic Regulator (LQR) problem. While model-free approaches are often favorable in practice, thus far only model-based methods, which rely on costly system identification, have been shown to achieve regret that scales with the optimal dependence on the time horizon T. We present the first model-free algorithm that achieves similar regret guarantees. Our method relies on an efficient policy gradient scheme, and a novel and tighter analysis of the cost of exploration in policy space in this setting.}\n}", "pdf": "http://proceedings.mlr.press/v139/cassel21a/cassel21a.pdf", "supp": "", "pdf_size": 296081, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11813325476129742837&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Blavatnik School of Computer Science, Tel Aviv University + Google Research, Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University + Google Research, Tel Aviv", "aff_domain": "mail.tau.ac.il;tauex.tau.ac.il", "email": "mail.tau.ac.il;tauex.tau.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/cassel21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Tel Aviv University;Google", "aff_unique_dep": "Blavatnik School of Computer Science;Google Research", "aff_unique_url": "https://www.tau.ac.il;https://research.google", "aff_unique_abbr": "TAU;Google", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Israel" }, { "title": "Online Selection Problems against Constrained Adversary", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9829", "id": "9829", "proceeding": "http://proceedings.mlr.press/v139/jiang21h.html", "slides": "", "author_site": "Zhihao Jiang, Pinyan Lu, Zhihao Gavin Tang, Yuhao Zhang", "author": "Zhihao Jiang; Pinyan Lu; Zhihao Gavin Tang; Yuhao Zhang", "abstract": "Inspired by a recent line of work in online algorithms with predictions, we study the constrained adversary model that utilizes predictions from a different perspective. Prior works mostly focused on designing simultaneously robust and consistent algorithms, without making assumptions on the quality of the predictions. In contrary, our model assumes the adversarial instance is consistent with the predictions and aim to design algorithms that have best worst-case performance against all such instances. We revisit classical online selection problems under the constrained adversary model. For the single item selection problem, we design an optimal algorithm in the adversarial arrival model and an improved algorithm in the random arrival model (a.k.a., the secretary problem). For the online edge-weighted bipartite matching problem, we extend the classical Water-filling and Ranking algorithms and achieve improved competitive ratios.", "bibtex": "@InProceedings{pmlr-v139-jiang21h,\n title = \t {Online Selection Problems against Constrained Adversary},\n author = {Jiang, Zhihao and Lu, Pinyan and Tang, Zhihao Gavin and Zhang, Yuhao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5002--5012},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21h/jiang21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21h.html},\n abstract = \t {Inspired by a recent line of work in online algorithms with predictions, we study the constrained adversary model that utilizes predictions from a different perspective. Prior works mostly focused on designing simultaneously robust and consistent algorithms, without making assumptions on the quality of the predictions. In contrary, our model assumes the adversarial instance is consistent with the predictions and aim to design algorithms that have best worst-case performance against all such instances. We revisit classical online selection problems under the constrained adversary model. For the single item selection problem, we design an optimal algorithm in the adversarial arrival model and an improved algorithm in the random arrival model (a.k.a., the secretary problem). For the online edge-weighted bipartite matching problem, we extend the classical Water-filling and Ranking algorithms and achieve improved competitive ratios.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21h/jiang21h.pdf", "supp": "", "pdf_size": 455336, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11738026395348266900&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Management Science and Engineering, Stanford University, Stanford, California, USA; School of Information Management and Engineering, Shanghai University of Finance and Economics, Shanghai, China; School of Information Management and Engineering, Shanghai University of Finance and Economics, Shanghai, China; John Hopcroft Center for Computer Science, Shanghai Jiao Tong University, Shanghai, China", "aff_domain": "stanford.edu;mail.shufe.edu.cn;mail.shufe.edu.cn;sjtu.edu.cn", "email": "stanford.edu;mail.shufe.edu.cn;mail.shufe.edu.cn;sjtu.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jiang21h.html", "aff_unique_index": "0;1;1;2", "aff_unique_norm": "Stanford University;Shanghai University of Finance and Economics;Shanghai Jiao Tong University", "aff_unique_dep": "Department of Management Science and Engineering;School of Information Management and Engineering;John Hopcroft Center for Computer Science", "aff_unique_url": "https://www.stanford.edu;http://www.sufe.edu.cn;https://www.sjtu.edu.cn", "aff_unique_abbr": "Stanford;SUFE;SJTU", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Stanford;Shanghai", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "United States;China" }, { "title": "Online Submodular Resource Allocation with Applications to Rebalancing Shared Mobility Systems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9237", "id": "9237", "proceeding": "http://proceedings.mlr.press/v139/sessa21a.html", "slides": "", "author_site": "Pier Giuseppe Sessa, Ilija Bogunovic, Andreas Krause, Maryam Kamgarpour", "author": "Pier Giuseppe Sessa; Ilija Bogunovic; Andreas Krause; Maryam Kamgarpour", "abstract": "Motivated by applications in shared mobility, we address the problem of allocating a group of agents to a set of resources to maximize a cumulative welfare objective. We model the welfare obtainable from each resource as a monotone DR-submodular function which is a-priori unknown and can only be learned by observing the welfare of selected allocations. Moreover, these functions can depend on time-varying contextual information. We propose a distributed scheme to maximize the cumulative welfare by designing a repeated game among the agents, who learn to act via regret minimization. We propose two design choices for the game rewards based on upper confidence bounds built around the unknown welfare functions. We analyze them theoretically, bounding the gap between the cumulative welfare of the game and the highest cumulative welfare obtainable in hindsight. Finally, we evaluate our approach in a realistic case study of rebalancing a shared mobility system (i.e., positioning vehicles in strategic areas). From observed trip data, our algorithm gradually learns the users\u2019 demand pattern and improves the overall system operation.", "bibtex": "@InProceedings{pmlr-v139-sessa21a,\n title = \t {Online Submodular Resource Allocation with Applications to Rebalancing Shared Mobility Systems},\n author = {Sessa, Pier Giuseppe and Bogunovic, Ilija and Krause, Andreas and Kamgarpour, Maryam},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9455--9464},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sessa21a/sessa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sessa21a.html},\n abstract = \t {Motivated by applications in shared mobility, we address the problem of allocating a group of agents to a set of resources to maximize a cumulative welfare objective. We model the welfare obtainable from each resource as a monotone DR-submodular function which is a-priori unknown and can only be learned by observing the welfare of selected allocations. Moreover, these functions can depend on time-varying contextual information. We propose a distributed scheme to maximize the cumulative welfare by designing a repeated game among the agents, who learn to act via regret minimization. We propose two design choices for the game rewards based on upper confidence bounds built around the unknown welfare functions. We analyze them theoretically, bounding the gap between the cumulative welfare of the game and the highest cumulative welfare obtainable in hindsight. Finally, we evaluate our approach in a realistic case study of rebalancing a shared mobility system (i.e., positioning vehicles in strategic areas). From observed trip data, our algorithm gradually learns the users\u2019 demand pattern and improves the overall system operation.}\n}", "pdf": "http://proceedings.mlr.press/v139/sessa21a/sessa21a.pdf", "supp": "", "pdf_size": 2664805, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13800186360235896336&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "ETH Zurich, Switzerland; ETH Zurich, Switzerland; ETH Zurich, Switzerland; ETH Zurich, Switzerland", "aff_domain": "ethz.ch; ; ; ", "email": "ethz.ch; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sessa21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Online Unrelated Machine Load Balancing with Predictions Revisited", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10465", "id": "10465", "proceeding": "http://proceedings.mlr.press/v139/li21w.html", "slides": "/media/icml-2021/Slides/10465_e72Exl8.pdf", "author_site": "Shi Li, Jiayi Xian", "author": "Shi Li; Jiayi Xian", "abstract": "We study the online load balancing problem with machine learned predictions, and give results that improve upon and extend those in a recent paper by Lattanzi et al. (2020). First, we design deterministic and randomized online rounding algorithms for the problem in the unrelated machine setting, with $O(\\frac{\\log m}{\\log \\log m})$- and $O(\\frac{\\log \\log m}{\\log \\log \\log m})$-competitive ratios. They respectively improve upon the previous ratios of $O(\\log m)$ and $O(\\log^3\\log m)$, and match the lower bounds given by Lattanzi et al. Second, we extend their prediction scheme from the identical machine restricted assignment setting to the unrelated machine setting. With the knowledge of two vectors over machines, a dual vector and a weight vector, we can construct a good fractional assignment online, that can be passed to an online rounding algorithm. Finally, we consider the learning model introduced by Lavastida et al. (2020), and show that under the model, the two vectors can be learned efficiently with a few samples of instances.", "bibtex": "@InProceedings{pmlr-v139-li21w,\n title = \t {Online Unrelated Machine Load Balancing with Predictions Revisited},\n author = {Li, Shi and Xian, Jiayi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6523--6532},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21w/li21w.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21w.html},\n abstract = \t {We study the online load balancing problem with machine learned predictions, and give results that improve upon and extend those in a recent paper by Lattanzi et al. (2020). First, we design deterministic and randomized online rounding algorithms for the problem in the unrelated machine setting, with $O(\\frac{\\log m}{\\log \\log m})$- and $O(\\frac{\\log \\log m}{\\log \\log \\log m})$-competitive ratios. They respectively improve upon the previous ratios of $O(\\log m)$ and $O(\\log^3\\log m)$, and match the lower bounds given by Lattanzi et al. Second, we extend their prediction scheme from the identical machine restricted assignment setting to the unrelated machine setting. With the knowledge of two vectors over machines, a dual vector and a weight vector, we can construct a good fractional assignment online, that can be passed to an online rounding algorithm. Finally, we consider the learning model introduced by Lavastida et al. (2020), and show that under the model, the two vectors can be learned efficiently with a few samples of instances.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21w/li21w.pdf", "supp": "", "pdf_size": 333364, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5225485534405630130&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Engineering, University at Buffalo, Buffalo, NY, USA; Department of Computer Science and Engineering, University at Buffalo, Buffalo, NY, USA", "aff_domain": "buffalo.edu;buffalo.edu", "email": "buffalo.edu;buffalo.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21w.html", "aff_unique_index": "0;0", "aff_unique_norm": "University at Buffalo", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.buffalo.edu", "aff_unique_abbr": "UB", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Buffalo", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Oops I Took A Gradient: Scalable Sampling for Discrete Distributions", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9335", "id": "9335", "proceeding": "http://proceedings.mlr.press/v139/grathwohl21a.html", "slides": "/media/icml-2021/Slides/9335.pdf", "author_site": "Will Grathwohl, Kevin Swersky, Milad Hashemi, David Duvenaud, Chris Maddison", "author": "Will Grathwohl; Kevin Swersky; Milad Hashemi; David Duvenaud; Chris Maddison", "abstract": "We propose a general and scalable approximate sampling strategy for probabilistic models with discrete variables. Our approach uses gradients of the likelihood function with respect to its discrete inputs to propose updates in a Metropolis-Hastings sampler. We show empirically that this approach outperforms generic samplers in a number of difficult settings including Ising models, Potts models, restricted Boltzmann machines, and factorial hidden Markov models. We also demonstrate our improved sampler for training deep energy-based models on high dimensional discrete image data. This approach outperforms variational auto-encoders and existing energy-based models. Finally, we give bounds showing that our approach is near-optimal in the class of samplers which propose local updates.", "bibtex": "@InProceedings{pmlr-v139-grathwohl21a,\n title = \t {Oops I Took A Gradient: Scalable Sampling for Discrete Distributions},\n author = {Grathwohl, Will and Swersky, Kevin and Hashemi, Milad and Duvenaud, David and Maddison, Chris},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3831--3841},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/grathwohl21a/grathwohl21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/grathwohl21a.html},\n abstract = \t {We propose a general and scalable approximate sampling strategy for probabilistic models with discrete variables. Our approach uses gradients of the likelihood function with respect to its discrete inputs to propose updates in a Metropolis-Hastings sampler. We show empirically that this approach outperforms generic samplers in a number of difficult settings including Ising models, Potts models, restricted Boltzmann machines, and factorial hidden Markov models. We also demonstrate our improved sampler for training deep energy-based models on high dimensional discrete image data. This approach outperforms variational auto-encoders and existing energy-based models. Finally, we give bounds showing that our approach is near-optimal in the class of samplers which propose local updates.}\n}", "pdf": "http://proceedings.mlr.press/v139/grathwohl21a/grathwohl21a.pdf", "supp": "", "pdf_size": 4360997, "gs_citation": 119, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6540555600529946476&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Toronto and Vector Institute+Google Research, Brain Team; Google Research, Brain Team; Google Research, Brain Team; University of Toronto and Vector Institute+Google Research, Brain Team; University of Toronto and Vector Institute", "aff_domain": "cs.toronto.edu; ; ; ; ", "email": "cs.toronto.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/grathwohl21a.html", "aff_unique_index": "0+1;1;1;0+1;0", "aff_unique_norm": "University of Toronto;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.utoronto.ca;https://research.google", "aff_unique_abbr": "U of T;Google", "aff_campus_unique_index": "0+1;1;1;0+1;0", "aff_campus_unique": "Toronto;Mountain View", "aff_country_unique_index": "0+1;1;1;0+1;0", "aff_country_unique": "Canada;United States" }, { "title": "Opening the Blackbox: Accelerating Neural Differential Equations by Regularizing Internal Solver Heuristics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10345", "id": "10345", "proceeding": "http://proceedings.mlr.press/v139/pal21a.html", "slides": "", "author_site": "Avik Pal, Yingbo Ma, Viral Shah, Christopher Rackauckas", "author": "Avik Pal; Yingbo Ma; Viral Shah; Christopher V Rackauckas", "abstract": "Democratization of machine learning requires architectures that automatically adapt to new problems. Neural Differential Equations (NDEs) have emerged as a popular modeling framework by removing the need for ML practitioners to choose the number of layers in a recurrent model. While we can control the computational cost by choosing the number of layers in standard architectures, in NDEs the number of neural network evaluations for a forward pass can depend on the number of steps of the adaptive ODE solver. But, can we force the NDE to learn the version with the least steps while not increasing the training cost? Current strategies to overcome slow prediction require high order automatic differentiation, leading to significantly higher training time. We describe a novel regularization method that uses the internal cost heuristics of adaptive differential equation solvers combined with discrete adjoint sensitivities to guide the training process towards learning NDEs that are easier to solve. This approach opens up the blackbox numerical analysis behind the differential equation solver\u2019s algorithm and directly uses its local error estimates and stiffness heuristics as cheap and accurate cost estimates. We incorporate our method without any change in the underlying NDE framework and show that our method extends beyond Ordinary Differential Equations to accommodate Neural Stochastic Differential Equations. We demonstrate how our approach can halve the prediction time and, unlike other methods which can increase the training time by an order of magnitude, we demonstrate similar reduction in training times. Together this showcases how the knowledge embedded within state-of-the-art equation solvers can be used to enhance machine learning.", "bibtex": "@InProceedings{pmlr-v139-pal21a,\n title = \t {Opening the Blackbox: Accelerating Neural Differential Equations by Regularizing Internal Solver Heuristics},\n author = {Pal, Avik and Ma, Yingbo and Shah, Viral and Rackauckas, Christopher V},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8325--8335},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pal21a/pal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/pal21a.html},\n abstract = \t {Democratization of machine learning requires architectures that automatically adapt to new problems. Neural Differential Equations (NDEs) have emerged as a popular modeling framework by removing the need for ML practitioners to choose the number of layers in a recurrent model. While we can control the computational cost by choosing the number of layers in standard architectures, in NDEs the number of neural network evaluations for a forward pass can depend on the number of steps of the adaptive ODE solver. But, can we force the NDE to learn the version with the least steps while not increasing the training cost? Current strategies to overcome slow prediction require high order automatic differentiation, leading to significantly higher training time. We describe a novel regularization method that uses the internal cost heuristics of adaptive differential equation solvers combined with discrete adjoint sensitivities to guide the training process towards learning NDEs that are easier to solve. This approach opens up the blackbox numerical analysis behind the differential equation solver\u2019s algorithm and directly uses its local error estimates and stiffness heuristics as cheap and accurate cost estimates. We incorporate our method without any change in the underlying NDE framework and show that our method extends beyond Ordinary Differential Equations to accommodate Neural Stochastic Differential Equations. We demonstrate how our approach can halve the prediction time and, unlike other methods which can increase the training time by an order of magnitude, we demonstrate similar reduction in training times. Together this showcases how the knowledge embedded within state-of-the-art equation solvers can be used to enhance machine learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/pal21a/pal21a.pdf", "supp": "", "pdf_size": 532874, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18086323631654265504&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Indian Institute of Technology Kanpur+Julia Computing; Julia Computing; Julia Computing; Massachusetts Institute of Technology+Pumas AI+University of Maryland Baltimore", "aff_domain": "cse.iitk.ac.in; ; ;mit.edu", "email": "cse.iitk.ac.in; ; ;mit.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/pal21a.html", "aff_unique_index": "0+1;1;1;2+3+4", "aff_unique_norm": "Indian Institute of Technology Kanpur;Julia Computing;Massachusetts Institute of Technology;Pumas AI;University of Maryland, Baltimore", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.iitk.ac.in;https://juliacomputing.com;https://web.mit.edu;;https://www.umaryland.edu", "aff_unique_abbr": "IIT Kanpur;Julia Computing;MIT;;UMB", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Kanpur;;Baltimore", "aff_country_unique_index": "0+1;1;1;1+1", "aff_country_unique": "India;United States;" }, { "title": "Operationalizing Complex Causes: A Pragmatic View of Mediation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9915", "id": "9915", "proceeding": "http://proceedings.mlr.press/v139/gultchin21a.html", "slides": "/media/icml-2021/Slides/9915.pdf", "author_site": "Limor Gultchin, David Watson, Matt J. Kusner, Ricardo Silva", "author": "Limor Gultchin; David Watson; Matt Kusner; Ricardo Silva", "abstract": "We examine the problem of causal response estimation for complex objects (e.g., text, images, genomics). In this setting, classical \\emph{atomic} interventions are often not available (e.g., changes to characters, pixels, DNA base-pairs). Instead, we only have access to indirect or \\emph{crude} interventions (e.g., enrolling in a writing program, modifying a scene, applying a gene therapy). In this work, we formalize this problem and provide an initial solution. Given a collection of candidate mediators, we propose (a) a two-step method for predicting the causal responses of crude interventions; and (b) a testing procedure to identify mediators of crude interventions. We demonstrate, on a range of simulated and real-world-inspired examples, that our approach allows us to efficiently estimate the effect of crude interventions with limited data from new treatment regimes.", "bibtex": "@InProceedings{pmlr-v139-gultchin21a,\n title = \t {Operationalizing Complex Causes: A Pragmatic View of Mediation},\n author = {Gultchin, Limor and Watson, David and Kusner, Matt and Silva, Ricardo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3875--3885},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gultchin21a/gultchin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gultchin21a.html},\n abstract = \t {We examine the problem of causal response estimation for complex objects (e.g., text, images, genomics). In this setting, classical \\emph{atomic} interventions are often not available (e.g., changes to characters, pixels, DNA base-pairs). Instead, we only have access to indirect or \\emph{crude} interventions (e.g., enrolling in a writing program, modifying a scene, applying a gene therapy). In this work, we formalize this problem and provide an initial solution. Given a collection of candidate mediators, we propose (a) a two-step method for predicting the causal responses of crude interventions; and (b) a testing procedure to identify mediators of crude interventions. We demonstrate, on a range of simulated and real-world-inspired examples, that our approach allows us to efficiently estimate the effect of crude interventions with limited data from new treatment regimes.}\n}", "pdf": "http://proceedings.mlr.press/v139/gultchin21a/gultchin21a.pdf", "supp": "", "pdf_size": 1307439, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15565452123708375262&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, University of Oxford, Oxford, UK+The Alan Turing Institute, London, UK; Department of Statistical Science, University of College London, London, UK; Department of Computer Science, University of College London, London, UK; Department of Statistical Science, University of College London, London, UK", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gultchin21a.html", "aff_unique_index": "0+1;2;2;2", "aff_unique_norm": "University of Oxford;Alan Turing Institute;University College London", "aff_unique_dep": "Department of Computer Science;;Department of Statistical Science", "aff_unique_url": "https://www.ox.ac.uk;https://www.turing.ac.uk;https://www.ucl.ac.uk", "aff_unique_abbr": "Oxford;ATI;UCL", "aff_campus_unique_index": "0+1;1;1;1", "aff_campus_unique": "Oxford;London", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "OptiDICE: Offline Policy Optimization via Stationary Distribution Correction Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10187", "id": "10187", "proceeding": "http://proceedings.mlr.press/v139/lee21f.html", "slides": "/media/icml-2021/Slides/10187.pdf", "author_site": "Jongmin Lee, Wonseok Jeon, Byung-Jun Lee, Joelle Pineau, Kee-Eung Kim", "author": "Jongmin Lee; Wonseok Jeon; Byungjun Lee; Joelle Pineau; Kee-Eung Kim", "abstract": "We consider the offline reinforcement learning (RL) setting where the agent aims to optimize the policy solely from the data without further environment interactions. In offline RL, the distributional shift becomes the primary source of difficulty, which arises from the deviation of the target policy being optimized from the behavior policy used for data collection. This typically causes overestimation of action values, which poses severe problems for model-free algorithms that use bootstrapping. To mitigate the problem, prior offline RL algorithms often used sophisticated techniques that encourage underestimation of action values, which introduces an additional set of hyperparameters that need to be tuned properly. In this paper, we present an offline RL algorithm that prevents overestimation in a more principled way. Our algorithm, OptiDICE, directly estimates the stationary distribution corrections of the optimal policy and does not rely on policy-gradients, unlike previous offline RL algorithms. Using an extensive set of benchmark datasets for offline RL, we show that OptiDICE performs competitively with the state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v139-lee21f,\n title = \t {OptiDICE: Offline Policy Optimization via Stationary Distribution Correction Estimation},\n author = {Lee, Jongmin and Jeon, Wonseok and Lee, Byungjun and Pineau, Joelle and Kim, Kee-Eung},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6120--6130},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21f/lee21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21f.html},\n abstract = \t {We consider the offline reinforcement learning (RL) setting where the agent aims to optimize the policy solely from the data without further environment interactions. In offline RL, the distributional shift becomes the primary source of difficulty, which arises from the deviation of the target policy being optimized from the behavior policy used for data collection. This typically causes overestimation of action values, which poses severe problems for model-free algorithms that use bootstrapping. To mitigate the problem, prior offline RL algorithms often used sophisticated techniques that encourage underestimation of action values, which introduces an additional set of hyperparameters that need to be tuned properly. In this paper, we present an offline RL algorithm that prevents overestimation in a more principled way. Our algorithm, OptiDICE, directly estimates the stationary distribution corrections of the optimal policy and does not rely on policy-gradients, unlike previous offline RL algorithms. Using an extensive set of benchmark datasets for offline RL, we show that OptiDICE performs competitively with the state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21f/lee21f.pdf", "supp": "", "pdf_size": 1018414, "gs_citation": 129, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17931384471662310085&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "School of Computing, KAIST; Mila, Quebec AI Institute + School of Computer Science, McGill University; Gauss Labs Inc.; Mila, Quebec AI Institute + School of Computer Science, McGill University + Facebook AI Research; School of Computing, KAIST + Graduate School of AI, KAIST", "aff_domain": "ai.kaist.ac.kr;mila.quebec; ; ; ", "email": "ai.kaist.ac.kr;mila.quebec; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lee21f.html", "aff_unique_index": "0;1+2;3;1+2+4;0+0", "aff_unique_norm": "KAIST;Quebec AI Institute;McGill University;Gauss Labs Inc.;Meta", "aff_unique_dep": "School of Computing;AI Institute;School of Computer Science;;Facebook AI Research", "aff_unique_url": "https://www.kaist.ac.kr;https://www.mila.quebec;https://www.mcgill.ca;;https://research.facebook.com", "aff_unique_abbr": "KAIST;Mila;McGill;;FAIR", "aff_campus_unique_index": "1+2;1+2;", "aff_campus_unique": ";Quebec;Montreal", "aff_country_unique_index": "0;1+1;2;1+1+2;0+0", "aff_country_unique": "South Korea;Canada;United States" }, { "title": "Optimal Complexity in Decentralized Training", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8893", "id": "8893", "proceeding": "http://proceedings.mlr.press/v139/lu21a.html", "slides": "", "author_site": "Yucheng Lu, Christopher De Sa", "author": "Yucheng Lu; Christopher De Sa", "abstract": "Decentralization is a promising method of scaling up parallel machine learning systems. In this paper, we provide a tight lower bound on the iteration complexity for such methods in a stochastic non-convex setting. Our lower bound reveals a theoretical gap in known convergence rates of many existing decentralized training algorithms, such as D-PSGD. We prove by construction this lower bound is tight and achievable. Motivated by our insights, we further propose DeTAG, a practical gossip-style decentralized algorithm that achieves the lower bound with only a logarithm gap. Empirically, we compare DeTAG with other decentralized algorithms on image classification tasks, and we show DeTAG enjoys faster convergence compared to baselines, especially on unshuffled data and in sparse networks.", "bibtex": "@InProceedings{pmlr-v139-lu21a,\n title = \t {Optimal Complexity in Decentralized Training},\n author = {Lu, Yucheng and De Sa, Christopher},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7111--7123},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lu21a/lu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lu21a.html},\n abstract = \t {Decentralization is a promising method of scaling up parallel machine learning systems. In this paper, we provide a tight lower bound on the iteration complexity for such methods in a stochastic non-convex setting. Our lower bound reveals a theoretical gap in known convergence rates of many existing decentralized training algorithms, such as D-PSGD. We prove by construction this lower bound is tight and achievable. Motivated by our insights, we further propose DeTAG, a practical gossip-style decentralized algorithm that achieves the lower bound with only a logarithm gap. Empirically, we compare DeTAG with other decentralized algorithms on image classification tasks, and we show DeTAG enjoys faster convergence compared to baselines, especially on unshuffled data and in sparse networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/lu21a/lu21a.pdf", "supp": "", "pdf_size": 1126125, "gs_citation": 92, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7676607792298668700&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Cornell University, Ithaca, New York, United States; Department of Computer Science, Cornell University, Ithaca, New York, United States", "aff_domain": "cornell.edu; ", "email": "cornell.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lu21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Optimal Counterfactual Explanations in Tree Ensembles", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9761", "id": "9761", "proceeding": "http://proceedings.mlr.press/v139/parmentier21a.html", "slides": "/media/icml-2021/Slides/9761.pdf", "author_site": "Axel Parmentier, Thibaut Vidal", "author": "Axel Parmentier; Thibaut Vidal", "abstract": "Counterfactual explanations are usually generated through heuristics that are sensitive to the search\u2019s initial conditions. The absence of guarantees of performance and robustness hinders trustworthiness. In this paper, we take a disciplined approach towards counterfactual explanations for tree ensembles. We advocate for a model-based search aiming at \"optimal\" explanations and propose efficient mixed-integer programming approaches. We show that isolation forests can be modeled within our framework to focus the search on plausible explanations with a low outlier score. We provide comprehensive coverage of additional constraints that model important objectives, heterogeneous data types, structural constraints on the feature space, along with resource and actionability restrictions. Our experimental analyses demonstrate that the proposed search approach requires a computational effort that is orders of magnitude smaller than previous mathematical programming algorithms. It scales up to large data sets and tree ensembles, where it provides, within seconds, systematic explanations grounded on well-defined models solved to optimality.", "bibtex": "@InProceedings{pmlr-v139-parmentier21a,\n title = \t {Optimal Counterfactual Explanations in Tree Ensembles},\n author = {Parmentier, Axel and Vidal, Thibaut},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8422--8431},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/parmentier21a/parmentier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/parmentier21a.html},\n abstract = \t {Counterfactual explanations are usually generated through heuristics that are sensitive to the search\u2019s initial conditions. The absence of guarantees of performance and robustness hinders trustworthiness. In this paper, we take a disciplined approach towards counterfactual explanations for tree ensembles. We advocate for a model-based search aiming at \"optimal\" explanations and propose efficient mixed-integer programming approaches. We show that isolation forests can be modeled within our framework to focus the search on plausible explanations with a low outlier score. We provide comprehensive coverage of additional constraints that model important objectives, heterogeneous data types, structural constraints on the feature space, along with resource and actionability restrictions. Our experimental analyses demonstrate that the proposed search approach requires a computational effort that is orders of magnitude smaller than previous mathematical programming algorithms. It scales up to large data sets and tree ensembles, where it provides, within seconds, systematic explanations grounded on well-defined models solved to optimality.}\n}", "pdf": "http://proceedings.mlr.press/v139/parmentier21a/parmentier21a.pdf", "supp": "", "pdf_size": 694083, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1410339152566950271&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "CERMICS, \u00c9cole des Ponts Paristech; CIRRELT & SCALE-AI Chair in Data-Driven Supply Chains, Department of Mathematics and Industrial Engineering, Polytechnique Montreal, Canada + Department of Computer Science, Pontifical Catholic University of Rio de Janeiro (PUC-Rio), Brazil", "aff_domain": "enpc.fr;cirrelt.ca", "email": "enpc.fr;cirrelt.ca", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/parmentier21a.html", "aff_unique_index": "0;1+2", "aff_unique_norm": "\u00c9cole des Ponts ParisTech;Polytechnique Montreal;Pontifical Catholic University of Rio de Janeiro", "aff_unique_dep": "CERMICS;Department of Mathematics and Industrial Engineering;Department of Computer Science", "aff_unique_url": "https://www.ponts.fr;https://www.polymtl.ca;https://www.puc-rio.br", "aff_unique_abbr": ";Polytechnique;PUC-Rio", "aff_campus_unique_index": "1", "aff_campus_unique": ";Rio de Janeiro", "aff_country_unique_index": "0;1+2", "aff_country_unique": "France;Canada;Brazil" }, { "title": "Optimal Estimation of High Dimensional Smooth Additive Function Based on Noisy Observations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9535", "id": "9535", "proceeding": "http://proceedings.mlr.press/v139/zhou21c.html", "slides": "", "author_site": "Fan Zhou, Ping Li", "author": "Fan Zhou; Ping Li", "abstract": "Given $\\bx_j = \\btheta + \\bepsilon_j$, $j=1,...,n$ where $\\btheta \\in \\RR^d$ is an unknown parameter and $\\bepsilon_j$ are i.i.d. Gaussian noise vectors, we study the estimation of $f(\\btheta)$ for a given smooth function $f:\\RR^d \\rightarrow \\RR$ equipped with an additive structure. We inherit the idea from a recent work which introduced an effective bias reduction technique through iterative bootstrap and derive a bias-reducing estimator. By establishing its normal approximation results, we show that the proposed estimator can achieve asymptotic normality with a looser constraint on smoothness compared with general smooth function due to the additive structure. Such results further imply that the proposed estimator is asymptotically efficient. Both upper and lower bounds on mean squared error are proved which shows the proposed estimator is minimax optimal for the smooth class considered. Numerical simulation results are presented to validate our analysis and show its superior performance of the proposed estimator over the plug-in approach in terms of bias reduction and building confidence\u00a0intervals.", "bibtex": "@InProceedings{pmlr-v139-zhou21c,\n title = \t {Optimal Estimation of High Dimensional Smooth Additive Function Based on Noisy Observations},\n author = {Zhou, Fan and Li, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12813--12823},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21c/zhou21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21c.html},\n abstract = \t {Given $\\bx_j = \\btheta + \\bepsilon_j$, $j=1,...,n$ where $\\btheta \\in \\RR^d$ is an unknown parameter and $\\bepsilon_j$ are i.i.d. Gaussian noise vectors, we study the estimation of $f(\\btheta)$ for a given smooth function $f:\\RR^d \\rightarrow \\RR$ equipped with an additive structure. We inherit the idea from a recent work which introduced an effective bias reduction technique through iterative bootstrap and derive a bias-reducing estimator. By establishing its normal approximation results, we show that the proposed estimator can achieve asymptotic normality with a looser constraint on smoothness compared with general smooth function due to the additive structure. Such results further imply that the proposed estimator is asymptotically efficient. Both upper and lower bounds on mean squared error are proved which shows the proposed estimator is minimax optimal for the smooth class considered. Numerical simulation results are presented to validate our analysis and show its superior performance of the proposed estimator over the plug-in approach in terms of bias reduction and building confidence\u00a0intervals.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21c/zhou21c.pdf", "supp": "", "pdf_size": 1118549, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16630432265303192241&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";", "aff_domain": ";", "email": ";", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhou21c.html" }, { "title": "Optimal Non-Convex Exact Recovery in Stochastic Block Model via Projected Power Method", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10669", "id": "10669", "proceeding": "http://proceedings.mlr.press/v139/wang21o.html", "slides": "/media/icml-2021/Slides/10669.pdf", "author_site": "Peng Wang, Huikang Liu, Zirui Zhou, Anthony Man-Cho So", "author": "Peng Wang; Huikang Liu; Zirui Zhou; Anthony Man-Cho So", "abstract": "In this paper, we study the problem of exact community recovery in the symmetric stochastic block model, where a graph of $n$ vertices is randomly generated by partitioning the vertices into $K \\ge 2$ equal-sized communities and then connecting each pair of vertices with probability that depends on their community memberships. Although the maximum-likelihood formulation of this problem is discrete and non-convex, we propose to tackle it directly using projected power iterations with an initialization that satisfies a partial recovery condition. Such an initialization can be obtained by a host of existing methods. We show that in the logarithmic degree regime of the considered problem, the proposed method can exactly recover the underlying communities at the information-theoretic limit. Moreover, with a qualified initialization, it runs in $\\mO(n\\log^2n/\\log\\log n)$ time, which is competitive with existing state-of-the-art methods. We also present numerical results of the proposed method to support and complement our theoretical development.", "bibtex": "@InProceedings{pmlr-v139-wang21o,\n title = \t {Optimal Non-Convex Exact Recovery in Stochastic Block Model via Projected Power Method},\n author = {Wang, Peng and Liu, Huikang and Zhou, Zirui and So, Anthony Man-Cho},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10828--10838},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21o/wang21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21o.html},\n abstract = \t {In this paper, we study the problem of exact community recovery in the symmetric stochastic block model, where a graph of $n$ vertices is randomly generated by partitioning the vertices into $K \\ge 2$ equal-sized communities and then connecting each pair of vertices with probability that depends on their community memberships. Although the maximum-likelihood formulation of this problem is discrete and non-convex, we propose to tackle it directly using projected power iterations with an initialization that satisfies a partial recovery condition. Such an initialization can be obtained by a host of existing methods. We show that in the logarithmic degree regime of the considered problem, the proposed method can exactly recover the underlying communities at the information-theoretic limit. Moreover, with a qualified initialization, it runs in $\\mO(n\\log^2n/\\log\\log n)$ time, which is competitive with existing state-of-the-art methods. We also present numerical results of the proposed method to support and complement our theoretical development.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21o/wang21o.pdf", "supp": "", "pdf_size": 531815, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2598400261123150872&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong, Hong Kong; Business School, Imperial College London, London, United Kingdom; Huawei Technologies Canada Co., Ltd., Burnaby, Canada; Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong, Hong Kong", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21o.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Chinese University of Hong Kong;Imperial College London;Huawei", "aff_unique_dep": "Department of Systems Engineering and Engineering Management;Business School;Huawei Technologies", "aff_unique_url": "https://www.cuhk.edu.hk;https://www.imperial.ac.uk;https://www.huawei.com/ca-en/", "aff_unique_abbr": "CUHK;Imperial College;Huawei", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "Hong Kong SAR;London;Burnaby", "aff_country_unique_index": "0;1;2;0", "aff_country_unique": "China;United Kingdom;Canada" }, { "title": "Optimal Off-Policy Evaluation from Multiple Logging Policies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10615", "id": "10615", "proceeding": "http://proceedings.mlr.press/v139/kallus21a.html", "slides": "/media/icml-2021/Slides/10615.pdf", "author_site": "Nathan Kallus, Yuta Saito, Masatoshi Uehara", "author": "Nathan Kallus; Yuta Saito; Masatoshi Uehara", "abstract": "We study off-policy evaluation (OPE) from multiple logging policies, each generating a dataset of fixed size, i.e., stratified sampling. Previous work noted that in this setting the ordering of the variances of different importance sampling estimators is instance-dependent, which brings up a dilemma as to which importance sampling weights to use. In this paper, we resolve this dilemma by finding the OPE estimator for multiple loggers with minimum variance for any instance, i.e., the efficient one. In particular, we establish the efficiency bound under stratified sampling and propose an estimator achieving this bound when given consistent $q$-estimates. To guard against misspecification of $q$-functions, we also provide a way to choose the control variate in a hypothesis class to minimize variance. Extensive experiments demonstrate the benefits of our methods\u2019 efficiently leveraging of the stratified sampling of off-policy data from multiple loggers.", "bibtex": "@InProceedings{pmlr-v139-kallus21a,\n title = \t {Optimal Off-Policy Evaluation from Multiple Logging Policies},\n author = {Kallus, Nathan and Saito, Yuta and Uehara, Masatoshi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5247--5256},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kallus21a/kallus21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kallus21a.html},\n abstract = \t {We study off-policy evaluation (OPE) from multiple logging policies, each generating a dataset of fixed size, i.e., stratified sampling. Previous work noted that in this setting the ordering of the variances of different importance sampling estimators is instance-dependent, which brings up a dilemma as to which importance sampling weights to use. In this paper, we resolve this dilemma by finding the OPE estimator for multiple loggers with minimum variance for any instance, i.e., the efficient one. In particular, we establish the efficiency bound under stratified sampling and propose an estimator achieving this bound when given consistent $q$-estimates. To guard against misspecification of $q$-functions, we also provide a way to choose the control variate in a hypothesis class to minimize variance. Extensive experiments demonstrate the benefits of our methods\u2019 efficiently leveraging of the stratified sampling of off-policy data from multiple loggers.}\n}", "pdf": "http://proceedings.mlr.press/v139/kallus21a/kallus21a.pdf", "supp": "", "pdf_size": 444727, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17334297684979627817&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Cornell University, NY, USA; Cornell University, NY, USA; Cornell University, NY, USA", "aff_domain": "cornell.edu; ; ", "email": "cornell.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kallus21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Optimal Streaming Algorithms for Multi-Armed Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8467", "id": "8467", "proceeding": "http://proceedings.mlr.press/v139/jin21a.html", "slides": "", "author_site": "Tianyuan Jin, Keke Huang, Jing Tang, Xiaokui Xiao", "author": "Tianyuan Jin; Keke Huang; Jing Tang; Xiaokui Xiao", "abstract": "This paper studies two variants of the best arm identification (BAI) problem under the streaming model, where we have a stream of n arms with reward distributions supported on [0,1] with unknown means. The arms in the stream are arriving one by one, and the algorithm cannot access an arm unless it is stored in a limited size memory. We first study the streaming \\epslion-topk-arms identification problem, which asks for k arms whose reward means are lower than that of the k-th best arm by at most \\epsilon with probability at least 1-\\delta. For general \\epsilon \\in (0,1), the existing solution for this problem assumes k = 1 and achieves the optimal sample complexity O(\\frac{n}{\\epsilon^2} \\log \\frac{1}{\\delta}) using O(\\log^*(n)) memory and a single pass of the stream. We propose an algorithm that works for any k and achieves the optimal sample complexity O(\\frac{n}{\\epsilon^2} \\log\\frac{k}{\\delta}) using a single-arm memory and a single pass of the stream. Second, we study the streaming BAI problem, where the objective is to identify the arm with the maximum reward mean with at least 1-\\delta probability, using a single-arm memory and as few passes of the input stream as possible. We present a single-arm-memory algorithm that achieves a near instance-dependent optimal sample complexity within O(\\log \\Delta_2^{-1}) passes, where \\Delta_2 is the gap between the mean of the best arm and that of the second best arm.", "bibtex": "@InProceedings{pmlr-v139-jin21a,\n title = \t {Optimal Streaming Algorithms for Multi-Armed Bandits},\n author = {Jin, Tianyuan and Huang, Keke and Tang, Jing and Xiao, Xiaokui},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5045--5054},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jin21a/jin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jin21a.html},\n abstract = \t {This paper studies two variants of the best arm identification (BAI) problem under the streaming model, where we have a stream of n arms with reward distributions supported on [0,1] with unknown means. The arms in the stream are arriving one by one, and the algorithm cannot access an arm unless it is stored in a limited size memory. We first study the streaming \\epslion-topk-arms identification problem, which asks for k arms whose reward means are lower than that of the k-th best arm by at most \\epsilon with probability at least 1-\\delta. For general \\epsilon \\in (0,1), the existing solution for this problem assumes k = 1 and achieves the optimal sample complexity O(\\frac{n}{\\epsilon^2} \\log \\frac{1}{\\delta}) using O(\\log^*(n)) memory and a single pass of the stream. We propose an algorithm that works for any k and achieves the optimal sample complexity O(\\frac{n}{\\epsilon^2} \\log\\frac{k}{\\delta}) using a single-arm memory and a single pass of the stream. Second, we study the streaming BAI problem, where the objective is to identify the arm with the maximum reward mean with at least 1-\\delta probability, using a single-arm memory and as few passes of the input stream as possible. We present a single-arm-memory algorithm that achieves a near instance-dependent optimal sample complexity within O(\\log \\Delta_2^{-1}) passes, where \\Delta_2 is the gap between the mean of the best arm and that of the second best arm.}\n}", "pdf": "http://proceedings.mlr.press/v139/jin21a/jin21a.pdf", "supp": "", "pdf_size": 310355, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=60756466645996799&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Computing, National University of Singapore, Singapore; School of Computing, National University of Singapore, Singapore; Data Science and Analytics Thrust, The Hong Kong University of Science and Technology, Guangzhou, China; School of Computing, National University of Singapore, Singapore", "aff_domain": "nus.edu.sg; ; ;nus.edu.sg", "email": "nus.edu.sg; ; ;nus.edu.sg", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jin21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "National University of Singapore;Hong Kong University of Science and Technology", "aff_unique_dep": "School of Computing;Data Science and Analytics Thrust", "aff_unique_url": "https://www.nus.edu.sg;https://www.ust.hk", "aff_unique_abbr": "NUS;HKUST", "aff_campus_unique_index": "1", "aff_campus_unique": ";Guangzhou", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "Singapore;China" }, { "title": "Optimal Thompson Sampling strategies for support-aware CVaR bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9315", "id": "9315", "proceeding": "http://proceedings.mlr.press/v139/baudry21a.html", "slides": "", "author_site": "Dorian Baudry, Romain Gautron, Emilie Kaufmann, Odalric-Ambrym Maillard", "author": "Dorian Baudry; Romain Gautron; Emilie Kaufmann; Odalric Maillard", "abstract": "In this paper we study a multi-arm bandit problem in which the quality of each arm is measured by the Conditional Value at Risk (CVaR) at some level alpha of the reward distribution. While existing works in this setting mainly focus on Upper Confidence Bound algorithms, we introduce a new Thompson Sampling approach for CVaR bandits on bounded rewards that is flexible enough to solve a variety of problems grounded on physical resources. Building on a recent work by Riou & Honda (2020), we introduce B-CVTS for continuous bounded rewards and M-CVTS for multinomial distributions. On the theoretical side, we provide a non-trivial extension of their analysis that enables to theoretically bound their CVaR regret minimization performance. Strikingly, our results show that these strategies are the first to provably achieve asymptotic optimality in CVaR bandits, matching the corresponding asymptotic lower bounds for this setting. Further, we illustrate empirically the benefit of Thompson Sampling approaches both in a realistic environment simulating a use-case in agriculture and on various synthetic examples.", "bibtex": "@InProceedings{pmlr-v139-baudry21a,\n title = \t {Optimal Thompson Sampling strategies for support-aware CVaR bandits},\n author = {Baudry, Dorian and Gautron, Romain and Kaufmann, Emilie and Maillard, Odalric},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {716--726},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/baudry21a/baudry21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/baudry21a.html},\n abstract = \t {In this paper we study a multi-arm bandit problem in which the quality of each arm is measured by the Conditional Value at Risk (CVaR) at some level alpha of the reward distribution. While existing works in this setting mainly focus on Upper Confidence Bound algorithms, we introduce a new Thompson Sampling approach for CVaR bandits on bounded rewards that is flexible enough to solve a variety of problems grounded on physical resources. Building on a recent work by Riou & Honda (2020), we introduce B-CVTS for continuous bounded rewards and M-CVTS for multinomial distributions. On the theoretical side, we provide a non-trivial extension of their analysis that enables to theoretically bound their CVaR regret minimization performance. Strikingly, our results show that these strategies are the first to provably achieve asymptotic optimality in CVaR bandits, matching the corresponding asymptotic lower bounds for this setting. Further, we illustrate empirically the benefit of Thompson Sampling approaches both in a realistic environment simulating a use-case in agriculture and on various synthetic examples.}\n}", "pdf": "http://proceedings.mlr.press/v139/baudry21a/baudry21a.pdf", "supp": "", "pdf_size": 3172553, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13964455175632716086&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9198-CRIStAL, F-59000 Lille, France+CGIAR Platform for Big Data in Agriculture, Alliance of CIAT and Bioversity International, Km 17 Recta Cali-Palmira, Apartado A\u00e9reo 6713, Cali, Colombia; CIRAD, UPR AIDA, F-34398 Montpellier, France; Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9198-CRIStAL, F-59000 Lille, France; Univ. Lille, CNRS, Inria, Centrale Lille, UMR 9198-CRIStAL, F-59000 Lille, France", "aff_domain": "inria.fr; ; ; ", "email": "inria.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/baudry21a.html", "aff_unique_index": "0+1;2;0;0", "aff_unique_norm": "University of Lille;CGIAR Platform for Big Data in Agriculture;CIRAD", "aff_unique_dep": "UMR 9198-CRIStAL;Alliance of CIAT and Bioversity International;UPR AIDA", "aff_unique_url": "https://www.univ-lille.fr;;", "aff_unique_abbr": "Univ. Lille;;", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Lille;", "aff_country_unique_index": "0+1;0;0;0", "aff_country_unique": "France;Colombia" }, { "title": "Optimal Transport Kernels for Sequential and Parallel Neural Architecture Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9843", "id": "9843", "proceeding": "http://proceedings.mlr.press/v139/nguyen21d.html", "slides": "", "author_site": "Vu Nguyen, Tam Le, Makoto Yamada, Michael A Osborne", "author": "Vu Nguyen; Tam Le; Makoto Yamada; Michael A. Osborne", "abstract": "Neural architecture search (NAS) automates the design of deep neural networks. One of the main challenges in searching complex and non-continuous architectures is to compare the similarity of networks that the conventional Euclidean metric may fail to capture. Optimal transport (OT) is resilient to such complex structure by considering the minimal cost for transporting a network into another. However, the OT is generally not negative definite which may limit its ability to build the positive-definite kernels required in many kernel-dependent frameworks. Building upon tree-Wasserstein (TW), which is a negative definite variant of OT, we develop a novel discrepancy for neural architectures, and demonstrate it within a Gaussian process surrogate model for the sequential NAS settings. Furthermore, we derive a novel parallel NAS, using quality k-determinantal point process on the GP posterior, to select diverse and high-performing architectures from a discrete set of candidates. Empirically, we demonstrate that our TW-based approaches outperform other baselines in both sequential and parallel NAS.", "bibtex": "@InProceedings{pmlr-v139-nguyen21d,\n title = \t {Optimal Transport Kernels for Sequential and Parallel Neural Architecture Search},\n author = {Nguyen, Vu and Le, Tam and Yamada, Makoto and Osborne, Michael A.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8084--8095},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21d/nguyen21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21d.html},\n abstract = \t {Neural architecture search (NAS) automates the design of deep neural networks. One of the main challenges in searching complex and non-continuous architectures is to compare the similarity of networks that the conventional Euclidean metric may fail to capture. Optimal transport (OT) is resilient to such complex structure by considering the minimal cost for transporting a network into another. However, the OT is generally not negative definite which may limit its ability to build the positive-definite kernels required in many kernel-dependent frameworks. Building upon tree-Wasserstein (TW), which is a negative definite variant of OT, we develop a novel discrepancy for neural architectures, and demonstrate it within a Gaussian process surrogate model for the sequential NAS settings. Furthermore, we derive a novel parallel NAS, using quality k-determinantal point process on the GP posterior, to select diverse and high-performing architectures from a discrete set of candidates. Empirically, we demonstrate that our TW-based approaches outperform other baselines in both sequential and parallel NAS.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21d/nguyen21d.pdf", "supp": "", "pdf_size": 1422701, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12662732608463413645&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Amazon Adelaide (work done prior to joining Amazon); RIKEN AIP; Kyoto University; University of Oxford", "aff_domain": "ieee.org; ; ; ", "email": "ieee.org; ; ; ", "github": "", "project": "https://www.automl.org/automl/literature-on-neural-architecture-search", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/nguyen21d.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Amazon;RIKEN;Kyoto University;University of Oxford", "aff_unique_dep": "Amazon;Advanced Institute for Computational Science;;", "aff_unique_url": "https://www.amazon.com;https://www.aip.riken.jp;https://www.kyoto-u.ac.jp;https://www.ox.ac.uk", "aff_unique_abbr": "Amazon;RIKEN AIP;Kyoto U;Oxford", "aff_campus_unique_index": "0", "aff_campus_unique": "Adelaide;", "aff_country_unique_index": "0;1;1;2", "aff_country_unique": "Australia;Japan;United Kingdom" }, { "title": "Optimal regret algorithm for Pseudo-1d Bandit Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10431", "id": "10431", "proceeding": "http://proceedings.mlr.press/v139/saha21c.html", "slides": "/media/icml-2021/Slides/10431.pdf", "author_site": "Aadirupa Saha, Nagarajan Natarajan, Praneeth Netrapalli, Prateek Jain", "author": "Aadirupa Saha; Nagarajan Natarajan; Praneeth Netrapalli; Prateek Jain", "abstract": "We study online learning with bandit feedback (i.e. learner has access to only zeroth-order oracle) where cost/reward functions $\\f_t$ admit a \"pseudo-1d\" structure, i.e. $\\f_t(\\w) = \\loss_t(\\pred_t(\\w))$ where the output of $\\pred_t$ is one-dimensional. At each round, the learner observes context $\\x_t$, plays prediction $\\pred_t(\\w_t; \\x_t)$ (e.g. $\\pred_t(\\cdot)=\u27e8\\x_t, \\cdot\u27e9$) for some $\\w_t \\in \\mathbb{R}^d$ and observes loss $\\loss_t(\\pred_t(\\w_t))$ where $\\loss_t$ is a convex Lipschitz-continuous function. The goal is to minimize the standard regret metric. This pseudo-1d bandit convex optimization problem (\\SBCO) arises frequently in domains such as online decision-making or parameter-tuning in large systems. For this problem, we first show a regret lower bound of $\\min(\\sqrt{dT}, T^{3/4})$ for any algorithm, where $T$ is the number of rounds. We propose a new algorithm \\sbcalg that combines randomized online gradient descent with a kernelized exponential weights method to exploit the pseudo-1d structure effectively, guaranteeing the {\\em optimal} regret bound mentioned above, up to additional logarithmic factors. In contrast, applying state-of-the-art online convex optimization methods leads to $\\tilde{O}\\left(\\min\\left(d^{9.5}\\sqrt{T},\\sqrt{d}T^{3/4}\\right)\\right)$ regret, that is significantly suboptimal in terms of $d$.", "bibtex": "@InProceedings{pmlr-v139-saha21c,\n title = \t {Optimal regret algorithm for Pseudo-1d Bandit Convex Optimization},\n author = {Saha, Aadirupa and Natarajan, Nagarajan and Netrapalli, Praneeth and Jain, Prateek},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9255--9264},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/saha21c/saha21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/saha21c.html},\n abstract = \t {We study online learning with bandit feedback (i.e. learner has access to only zeroth-order oracle) where cost/reward functions $\\f_t$ admit a \"pseudo-1d\" structure, i.e. $\\f_t(\\w) = \\loss_t(\\pred_t(\\w))$ where the output of $\\pred_t$ is one-dimensional. At each round, the learner observes context $\\x_t$, plays prediction $\\pred_t(\\w_t; \\x_t)$ (e.g. $\\pred_t(\\cdot)=\u27e8\\x_t, \\cdot\u27e9$) for some $\\w_t \\in \\mathbb{R}^d$ and observes loss $\\loss_t(\\pred_t(\\w_t))$ where $\\loss_t$ is a convex Lipschitz-continuous function. The goal is to minimize the standard regret metric. This pseudo-1d bandit convex optimization problem (\\SBCO) arises frequently in domains such as online decision-making or parameter-tuning in large systems. For this problem, we first show a regret lower bound of $\\min(\\sqrt{dT}, T^{3/4})$ for any algorithm, where $T$ is the number of rounds. We propose a new algorithm \\sbcalg that combines randomized online gradient descent with a kernelized exponential weights method to exploit the pseudo-1d structure effectively, guaranteeing the {\\em optimal} regret bound mentioned above, up to additional logarithmic factors. In contrast, applying state-of-the-art online convex optimization methods leads to $\\tilde{O}\\left(\\min\\left(d^{9.5}\\sqrt{T},\\sqrt{d}T^{3/4}\\right)\\right)$ regret, that is significantly suboptimal in terms of $d$.}\n}", "pdf": "http://proceedings.mlr.press/v139/saha21c/saha21c.pdf", "supp": "", "pdf_size": 600362, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8077993119229741751&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Microsoft Research, New York City; Microsoft Research, India; Microsoft Research, India + Google Research, India; Microsoft Research, India + Google Research, India", "aff_domain": "microsoft.com; ; ; ", "email": "microsoft.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/saha21c.html", "aff_unique_index": "0;0;0+1;0+1", "aff_unique_norm": "Microsoft;Google", "aff_unique_dep": "Microsoft Research;Google Research", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://research.google", "aff_unique_abbr": "MSR;Google Research", "aff_campus_unique_index": "0;2;2", "aff_campus_unique": "New York City;;India", "aff_country_unique_index": "0;1;1+1;1+1", "aff_country_unique": "United States;India" }, { "title": "Optimization Planning for 3D ConvNets", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8521", "id": "8521", "proceeding": "http://proceedings.mlr.press/v139/qiu21c.html", "slides": "", "author_site": "Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, Tao Mei", "author": "Zhaofan Qiu; Ting Yao; Chong-Wah Ngo; Tao Mei", "abstract": "It is not trivial to optimally learn a 3D Convolutional Neural Networks (3D ConvNets) due to high complexity and various options of the training scheme. The most common hand-tuning process starts from learning 3D ConvNets using short video clips and then is followed by learning long-term temporal dependency using lengthy clips, while gradually decaying the learning rate from high to low as training progresses. The fact that such process comes along with several heuristic settings motivates the study to seek an optimal \"path\" to automate the entire training. In this paper, we decompose the path into a series of training \"states\" and specify the hyper-parameters, e.g., learning rate and the length of input clips, in each state. The estimation of the knee point on the performance-epoch curve triggers the transition from one state to another. We perform dynamic programming over all the candidate states to plan the optimal permutation of states, i.e., optimization path. Furthermore, we devise a new 3D ConvNets with a unique design of dual-head classifier to improve spatial and temporal discrimination. Extensive experiments on seven public video recognition benchmarks demonstrate the advantages of our proposal. With the optimization planning, our 3D ConvNets achieves superior results when comparing to the state-of-the-art recognition methods. More remarkably, we obtain the top-1 accuracy of 80.5% and 82.7% on Kinetics-400 and Kinetics-600 datasets, respectively.", "bibtex": "@InProceedings{pmlr-v139-qiu21c,\n title = \t {Optimization Planning for 3D ConvNets},\n author = {Qiu, Zhaofan and Yao, Ting and Ngo, Chong-Wah and Mei, Tao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8726--8736},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiu21c/qiu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiu21c.html},\n abstract = \t {It is not trivial to optimally learn a 3D Convolutional Neural Networks (3D ConvNets) due to high complexity and various options of the training scheme. The most common hand-tuning process starts from learning 3D ConvNets using short video clips and then is followed by learning long-term temporal dependency using lengthy clips, while gradually decaying the learning rate from high to low as training progresses. The fact that such process comes along with several heuristic settings motivates the study to seek an optimal \"path\" to automate the entire training. In this paper, we decompose the path into a series of training \"states\" and specify the hyper-parameters, e.g., learning rate and the length of input clips, in each state. The estimation of the knee point on the performance-epoch curve triggers the transition from one state to another. We perform dynamic programming over all the candidate states to plan the optimal permutation of states, i.e., optimization path. Furthermore, we devise a new 3D ConvNets with a unique design of dual-head classifier to improve spatial and temporal discrimination. Extensive experiments on seven public video recognition benchmarks demonstrate the advantages of our proposal. With the optimization planning, our 3D ConvNets achieves superior results when comparing to the state-of-the-art recognition methods. More remarkably, we obtain the top-1 accuracy of 80.5% and 82.7% on Kinetics-400 and Kinetics-600 datasets, respectively.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiu21c/qiu21c.pdf", "supp": "", "pdf_size": 7612681, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17965785653886460675&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "JD AI Research, Beijing, China; JD AI Research, Beijing, China; School of Computing and Information Systems, Singapore Management University, Singapore; JD AI Research, Beijing, China", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/qiu21c.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "JD;Singapore Management University", "aff_unique_dep": "JD AI Research;School of Computing and Information Systems", "aff_unique_url": ";https://www.smu.edu.sg", "aff_unique_abbr": ";SMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Beijing;", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "China;Singapore" }, { "title": "Optimization of Graph Neural Networks: Implicit Acceleration by Skip Connections and More Depth", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8619", "id": "8619", "proceeding": "http://proceedings.mlr.press/v139/xu21k.html", "slides": "/media/icml-2021/Slides/8619.pdf", "author_site": "Keyulu Xu, Mozhi Zhang, Stefanie Jegelka, Kenji Kawaguchi", "author": "Keyulu Xu; Mozhi Zhang; Stefanie Jegelka; Kenji Kawaguchi", "abstract": "Graph Neural Networks (GNNs) have been studied through the lens of expressive power and generalization. However, their optimization properties are less well understood. We take the first step towards analyzing GNN training by studying the gradient dynamics of GNNs. First, we analyze linearized GNNs and prove that despite the non-convexity of training, convergence to a global minimum at a linear rate is guaranteed under mild assumptions that we validate on real-world graphs. Second, we study what may affect the GNNs\u2019 training speed. Our results show that the training of GNNs is implicitly accelerated by skip connections, more depth, and/or a good label distribution. Empirical results confirm that our theoretical results for linearized GNNs align with the training behavior of nonlinear GNNs. Our results provide the first theoretical support for the success of GNNs with skip connections in terms of optimization, and suggest that deep GNNs with skip connections would be promising in practice.", "bibtex": "@InProceedings{pmlr-v139-xu21k,\n title = \t {Optimization of Graph Neural Networks: Implicit Acceleration by Skip Connections and More Depth},\n author = {Xu, Keyulu and Zhang, Mozhi and Jegelka, Stefanie and Kawaguchi, Kenji},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11592--11602},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21k/xu21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21k.html},\n abstract = \t {Graph Neural Networks (GNNs) have been studied through the lens of expressive power and generalization. However, their optimization properties are less well understood. We take the first step towards analyzing GNN training by studying the gradient dynamics of GNNs. First, we analyze linearized GNNs and prove that despite the non-convexity of training, convergence to a global minimum at a linear rate is guaranteed under mild assumptions that we validate on real-world graphs. Second, we study what may affect the GNNs\u2019 training speed. Our results show that the training of GNNs is implicitly accelerated by skip connections, more depth, and/or a good label distribution. Empirical results confirm that our theoretical results for linearized GNNs align with the training behavior of nonlinear GNNs. Our results provide the first theoretical support for the success of GNNs with skip connections in terms of optimization, and suggest that deep GNNs with skip connections would be promising in practice.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21k/xu21k.pdf", "supp": "", "pdf_size": 520189, "gs_citation": 107, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15846309199730043238&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Massachusetts Institute of Technology (MIT)+The University of Maryland; The University of Maryland; Massachusetts Institute of Technology (MIT); Harvard University", "aff_domain": "mit.edu; ;mit.edu;fas.harvard.edu", "email": "mit.edu; ;mit.edu;fas.harvard.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xu21k.html", "aff_unique_index": "0+1;1;0;2", "aff_unique_norm": "Massachusetts Institute of Technology;University of Maryland;Harvard University", "aff_unique_dep": ";;", "aff_unique_url": "https://web.mit.edu;https://www/umd.edu;https://www.harvard.edu", "aff_unique_abbr": "MIT;UMD;Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Optimizing Black-box Metrics with Iterative Example Weighting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9325", "id": "9325", "proceeding": "http://proceedings.mlr.press/v139/hiranandani21a.html", "slides": "/media/icml-2021/Slides/9325.pdf", "author_site": "Gaurush Hiranandani, Jatin Mathur, Harikrishna Narasimhan, Mahdi Milani Fard, Sanmi Koyejo", "author": "Gaurush Hiranandani; Jatin Mathur; Harikrishna Narasimhan; Mahdi Milani Fard; Sanmi Koyejo", "abstract": "We consider learning to optimize a classification metric defined by a black-box function of the confusion matrix. Such black-box learning settings are ubiquitous, for example, when the learner only has query access to the metric of interest, or in noisy-label and domain adaptation applications where the learner must evaluate the metric via performance evaluation using a small validation sample. Our approach is to adaptively learn example weights on the training dataset such that the resulting weighted objective best approximates the metric on the validation sample. We show how to model and estimate the example weights and use them to iteratively post-shift a pre-trained class probability estimator to construct a classifier. We also analyze the resulting procedure\u2019s statistical properties. Experiments on various label noise, domain shift, and fair classification setups confirm that our proposal compares favorably to the state-of-the-art baselines for each application.", "bibtex": "@InProceedings{pmlr-v139-hiranandani21a,\n title = \t {Optimizing Black-box Metrics with Iterative Example Weighting},\n author = {Hiranandani, Gaurush and Mathur, Jatin and Narasimhan, Harikrishna and Fard, Mahdi Milani and Koyejo, Sanmi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4239--4249},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hiranandani21a/hiranandani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hiranandani21a.html},\n abstract = \t {We consider learning to optimize a classification metric defined by a black-box function of the confusion matrix. Such black-box learning settings are ubiquitous, for example, when the learner only has query access to the metric of interest, or in noisy-label and domain adaptation applications where the learner must evaluate the metric via performance evaluation using a small validation sample. Our approach is to adaptively learn example weights on the training dataset such that the resulting weighted objective best approximates the metric on the validation sample. We show how to model and estimate the example weights and use them to iteratively post-shift a pre-trained class probability estimator to construct a classifier. We also analyze the resulting procedure\u2019s statistical properties. Experiments on various label noise, domain shift, and fair classification setups confirm that our proposal compares favorably to the state-of-the-art baselines for each application.}\n}", "pdf": "http://proceedings.mlr.press/v139/hiranandani21a/hiranandani21a.pdf", "supp": "", "pdf_size": 1238323, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2459105363066716864&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; Google Research; Google Research; Google Research", "aff_domain": "illinois.edu; ; ; ; ", "email": "illinois.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hiranandani21a.html", "aff_unique_index": "0;0;1;1;1", "aff_unique_norm": "University of Illinois Urbana-Champaign;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://illinois.edu;https://research.google", "aff_unique_abbr": "UIUC;Google Research", "aff_campus_unique_index": "0;0;1;1;1", "aff_campus_unique": "Urbana-Champaign;Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Optimizing persistent homology based functions", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10265", "id": "10265", "proceeding": "http://proceedings.mlr.press/v139/carriere21a.html", "slides": "", "author_site": "Mathieu Carri\u00e8re, Frederic Chazal, Marc Glisse, Yuichi Ike, Hariprasad Kannan, Yuhei Umeda", "author": "Mathieu Carriere; Frederic Chazal; Marc Glisse; Yuichi Ike; Hariprasad Kannan; Yuhei Umeda", "abstract": "Solving optimization tasks based on functions and losses with a topological flavor is a very active and growing field of research in data science and Topological Data Analysis, with applications in non-convex optimization, statistics and machine learning. However, the approaches proposed in the literature are usually anchored to a specific application and/or topological construction, and do not come with theoretical guarantees. To address this issue, we study the differentiability of a general map associated with the most common topological construction, that is, the persistence map. Building on real analytic geometry arguments, we propose a general framework that allows us to define and compute gradients for persistence-based functions in a very simple way. We also provide a simple, explicit and sufficient condition for convergence of stochastic subgradient methods for such functions. This result encompasses all the constructions and applications of topological optimization in the literature. Finally, we provide associated code, that is easy to handle and to mix with other non-topological methods and constraints, as well as some experiments showcasing the versatility of our approach.", "bibtex": "@InProceedings{pmlr-v139-carriere21a,\n title = \t {Optimizing persistent homology based functions},\n author = {Carriere, Mathieu and Chazal, Frederic and Glisse, Marc and Ike, Yuichi and Kannan, Hariprasad and Umeda, Yuhei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1294--1303},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/carriere21a/carriere21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/carriere21a.html},\n abstract = \t {Solving optimization tasks based on functions and losses with a topological flavor is a very active and growing field of research in data science and Topological Data Analysis, with applications in non-convex optimization, statistics and machine learning. However, the approaches proposed in the literature are usually anchored to a specific application and/or topological construction, and do not come with theoretical guarantees. To address this issue, we study the differentiability of a general map associated with the most common topological construction, that is, the persistence map. Building on real analytic geometry arguments, we propose a general framework that allows us to define and compute gradients for persistence-based functions in a very simple way. We also provide a simple, explicit and sufficient condition for convergence of stochastic subgradient methods for such functions. This result encompasses all the constructions and applications of topological optimization in the literature. Finally, we provide associated code, that is easy to handle and to mix with other non-topological methods and constraints, as well as some experiments showcasing the versatility of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/carriere21a/carriere21a.pdf", "supp": "", "pdf_size": 541156, "gs_citation": 85, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1795374418354954800&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Universit e C^ote d'Azur, Inria, France; Universit e Paris-Saclay, CNRS, Inria, Laboratoire de Math ematiques d'Orsay, France; Universit e Paris-Saclay, CNRS, Inria, Laboratoire de Math ematiques d'Orsay, France; Fujitsu Ltd., Kanagawa, Japan; Universit e Paris-Saclay, CNRS, Inria, Laboratoire de Math ematiques d'Orsay, France; Fujitsu Ltd., Kanagawa, Japan", "aff_domain": "inria.fr; ; ; ; ; ", "email": "inria.fr; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/carriere21a.html", "aff_unique_index": "0;1;1;2;1;2", "aff_unique_norm": "Universit\u00e9 C\u00f4te d'Azur;Universit\u00e9 Paris-Saclay;Fujitsu Limited", "aff_unique_dep": ";Laboratoire de Math\u00e9matiques d'Orsay;", "aff_unique_url": "https://www.univ-cotedazur.fr;https://www.universite-paris-saclay.fr;https://www.fujitsu.com", "aff_unique_abbr": ";;Fujitsu", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;0;1", "aff_country_unique": "France;Japan" }, { "title": "Order Matters: Probabilistic Modeling of Node Sequence for Graph Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8963", "id": "8963", "proceeding": "http://proceedings.mlr.press/v139/chen21j.html", "slides": "/media/icml-2021/Slides/8963_1Jyx6OI.pdf", "author_site": "Xiaohui Chen, Xu Han, Jiajing Hu, Francisco Ruiz, Liping Liu", "author": "Xiaohui Chen; Xu Han; Jiajing Hu; Francisco Ruiz; Liping Liu", "abstract": "A graph generative model defines a distribution over graphs. Typically, the model consists of a sequential process that creates and adds nodes and edges. Such sequential process defines an ordering of the nodes in the graph. The computation of the model\u2019s likelihood requires to marginalize the node orderings; this makes maximum likelihood estimation (MLE) challenging due to the (factorial) number of possible permutations. In this work, we provide an expression for the likelihood of a graph generative model and show that its calculation is closely related to the problem of graph automorphism. In addition, we derive a variational inference (VI) algorithm for fitting a graph generative model that is based on the maximization of a variational bound of the log-likelihood. This allows the model to be trained with node orderings from the approximate posterior instead of ad-hoc orderings. Our experiments show that our log-likelihood bound is significantly tighter than the bound of previous schemes. The models fitted with the VI algorithm are able to generate high-quality graphs that match the structures of target graphs not seen during training.", "bibtex": "@InProceedings{pmlr-v139-chen21j,\n title = \t {Order Matters: Probabilistic Modeling of Node Sequence for Graph Generation},\n author = {Chen, Xiaohui and Han, Xu and Hu, Jiajing and Ruiz, Francisco and Liu, Liping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1630--1639},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21j/chen21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21j.html},\n abstract = \t {A graph generative model defines a distribution over graphs. Typically, the model consists of a sequential process that creates and adds nodes and edges. Such sequential process defines an ordering of the nodes in the graph. The computation of the model\u2019s likelihood requires to marginalize the node orderings; this makes maximum likelihood estimation (MLE) challenging due to the (factorial) number of possible permutations. In this work, we provide an expression for the likelihood of a graph generative model and show that its calculation is closely related to the problem of graph automorphism. In addition, we derive a variational inference (VI) algorithm for fitting a graph generative model that is based on the maximization of a variational bound of the log-likelihood. This allows the model to be trained with node orderings from the approximate posterior instead of ad-hoc orderings. Our experiments show that our log-likelihood bound is significantly tighter than the bound of previous schemes. The models fitted with the VI algorithm are able to generate high-quality graphs that match the structures of target graphs not seen during training.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21j/chen21j.pdf", "supp": "", "pdf_size": 7071664, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10391803537150156085&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Tufts University; Department of Computer Science, Tufts University; Department of Computer Science, Tufts University; DeepMind; Department of Computer Science, Tufts University", "aff_domain": "tufts.edu; ; ; ; ", "email": "tufts.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/chen21j.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "Tufts University;DeepMind", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.tufts.edu;https://deepmind.com", "aff_unique_abbr": "Tufts;DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Order-Agnostic Cross Entropy for Non-Autoregressive Machine Translation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8931", "id": "8931", "proceeding": "http://proceedings.mlr.press/v139/du21c.html", "slides": "", "author_site": "Cunxiao Du, Zhaopeng Tu, Jing Jiang", "author": "Cunxiao Du; Zhaopeng Tu; Jing Jiang", "abstract": "We propose a new training objective named order-agnostic cross entropy (OaXE) for fully non-autoregressive translation (NAT) models. OaXE improves the standard cross-entropy loss to ameliorate the effect of word reordering, which is a common source of the critical multimodality problem in NAT. Concretely, OaXE removes the penalty for word order errors, and computes the cross entropy loss based on the best possible alignment between model predictions and target tokens. Since the log loss is very sensitive to invalid references, we leverage cross entropy initialization and loss truncation to ensure the model focuses on a good part of the search space. Extensive experiments on major WMT benchmarks demonstrate that OaXE substantially improves translation performance, setting new state of the art for fully NAT models. Further analyses show that OaXE indeed alleviates the multimodality problem by reducing token repetitions and increasing prediction confidence. Our code, data, and trained models are available at https://github.com/tencent-ailab/ICML21_OAXE.", "bibtex": "@InProceedings{pmlr-v139-du21c,\n title = \t {Order-Agnostic Cross Entropy for Non-Autoregressive Machine Translation},\n author = {Du, Cunxiao and Tu, Zhaopeng and Jiang, Jing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2849--2859},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21c/du21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21c.html},\n abstract = \t {We propose a new training objective named order-agnostic cross entropy (OaXE) for fully non-autoregressive translation (NAT) models. OaXE improves the standard cross-entropy loss to ameliorate the effect of word reordering, which is a common source of the critical multimodality problem in NAT. Concretely, OaXE removes the penalty for word order errors, and computes the cross entropy loss based on the best possible alignment between model predictions and target tokens. Since the log loss is very sensitive to invalid references, we leverage cross entropy initialization and loss truncation to ensure the model focuses on a good part of the search space. Extensive experiments on major WMT benchmarks demonstrate that OaXE substantially improves translation performance, setting new state of the art for fully NAT models. Further analyses show that OaXE indeed alleviates the multimodality problem by reducing token repetitions and increasing prediction confidence. Our code, data, and trained models are available at https://github.com/tencent-ailab/ICML21_OAXE.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21c/du21c.pdf", "supp": "", "pdf_size": 899916, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10622606881880564341&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Computing and Information System, Singapore Management University, Singapore + Tencent AI Lab; Tencent AI Lab, China; School of Computing and Information System, Singapore Management University, Singapore", "aff_domain": "smu.edu.sg;tencent.com;smu.edu.sg", "email": "smu.edu.sg;tencent.com;smu.edu.sg", "github": "https://github.com/tencent-ailab/ICML21_OAXE", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/du21c.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "Singapore Management University;Tencent", "aff_unique_dep": "School of Computing and Information System;Tencent AI Lab", "aff_unique_url": "https://www.smu.edu.sg;https://ai.tencent.com", "aff_unique_abbr": "SMU;Tencent AI Lab", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;0", "aff_country_unique": "Singapore;China" }, { "title": "Out-of-Distribution Generalization via Risk Extrapolation (REx)", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9185", "id": "9185", "proceeding": "http://proceedings.mlr.press/v139/krueger21a.html", "slides": "", "author_site": "David Krueger, Ethan Caballero, Joern-Henrik Jacobsen, Amy Zhang, Jonathan Binas, Dinghuai Zhang, Remi Le Priol, Aaron Courville", "author": "David Krueger; Ethan Caballero; Joern-Henrik Jacobsen; Amy Zhang; Jonathan Binas; Dinghuai Zhang; Remi Le Priol; Aaron Courville", "abstract": "Distributional shift is one of the major obstacles when transferring machine learning prediction systems from the lab to the real world. To tackle this problem, we assume that variation across training domains is representative of the variation we might encounter at test time, but also that shifts at test time may be more extreme in magnitude. In particular, we show that reducing differences in risk across training domains can reduce a model\u2019s sensitivity to a wide range of extreme distributional shifts, including the challenging setting where the input contains both causal and anti-causal elements. We motivate this approach, Risk Extrapolation (REx), as a form of robust optimization over a perturbation set of extrapolated domains (MM-REx), and propose a penalty on the variance of training risks (V-REx) as a simpler variant. We prove that variants of REx can recover the causal mechanisms of the targets, while also providing robustness to changes in the input distribution (\u201ccovariate shift\u201d). By appropriately trading-off robustness to causally induced distributional shifts and covariate shift, REx is able to outperform alternative methods such as Invariant Risk Minimization in situations where these types of shift co-occur.", "bibtex": "@InProceedings{pmlr-v139-krueger21a,\n title = \t {Out-of-Distribution Generalization via Risk Extrapolation (REx)},\n author = {Krueger, David and Caballero, Ethan and Jacobsen, Joern-Henrik and Zhang, Amy and Binas, Jonathan and Zhang, Dinghuai and Priol, Remi Le and Courville, Aaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5815--5826},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/krueger21a/krueger21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/krueger21a.html},\n abstract = \t {Distributional shift is one of the major obstacles when transferring machine learning prediction systems from the lab to the real world. To tackle this problem, we assume that variation across training domains is representative of the variation we might encounter at test time, but also that shifts at test time may be more extreme in magnitude. In particular, we show that reducing differences in risk across training domains can reduce a model\u2019s sensitivity to a wide range of extreme distributional shifts, including the challenging setting where the input contains both causal and anti-causal elements. We motivate this approach, Risk Extrapolation (REx), as a form of robust optimization over a perturbation set of extrapolated domains (MM-REx), and propose a penalty on the variance of training risks (V-REx) as a simpler variant. We prove that variants of REx can recover the causal mechanisms of the targets, while also providing robustness to changes in the input distribution (\u201ccovariate shift\u201d). By appropriately trading-off robustness to causally induced distributional shifts and covariate shift, REx is able to outperform alternative methods such as Invariant Risk Minimization in situations where these types of shift co-occur.}\n}", "pdf": "http://proceedings.mlr.press/v139/krueger21a/krueger21a.pdf", "supp": "", "pdf_size": 738509, "gs_citation": 1103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10054528338033032937&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Mila+University of Montreal; Mila+University of Montreal; Vector+University of Toronto; Mila+McGill University+Facebook AI Research; Mila+University of Montreal; Mila+University of Montreal; Mila+University of Montreal; Mila+University of Montreal", "aff_domain": "gmail.com; ; ; ; ; ; ; ", "email": "gmail.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/krueger21a.html", "aff_unique_index": "0+1;0+1;2+3;0+4+5;0+1;0+1;0+1;0+1", "aff_unique_norm": "Mila;University of Montreal;Vector Institute;University of Toronto;McGill University;Meta", "aff_unique_dep": "Quebec Artificial Intelligence Institute;;;;;Facebook AI Research", "aff_unique_url": "https://mila.quebec;https://wwwumontreal.ca;https://vectorinstitute.ai/;https://www.utoronto.ca;https://www.mcgill.ca;https://research.facebook.com", "aff_unique_abbr": "Mila;UM;Vector;U of T;McGill;FAIR", "aff_campus_unique_index": ";;;;;;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0;0+0+1;0+0;0+0;0+0;0+0", "aff_country_unique": "Canada;United States" }, { "title": "Outlier-Robust Optimal Transport", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9841", "id": "9841", "proceeding": "http://proceedings.mlr.press/v139/mukherjee21a.html", "slides": "", "author_site": "Debarghya Mukherjee, Aritra Guha, Justin Solomon, Yuekai Sun, Mikhail Yurochkin", "author": "Debarghya Mukherjee; Aritra Guha; Justin M Solomon; Yuekai Sun; Mikhail Yurochkin", "abstract": "Optimal transport (OT) measures distances between distributions in a way that depends on the geometry of the sample space. In light of recent advances in computational OT, OT distances are widely used as loss functions in machine learning. Despite their prevalence and advantages, OT loss functions can be extremely sensitive to outliers. In fact, a single adversarially-picked outlier can increase the standard $W_2$-distance arbitrarily. To address this issue, we propose an outlier-robust formulation of OT. Our formulation is convex but challenging to scale at a first glance. Our main contribution is deriving an \\emph{equivalent} formulation based on cost truncation that is easy to incorporate into modern algorithms for computational OT. We demonstrate the benefits of our formulation in mean estimation problems under the Huber contamination model in simulations and outlier detection tasks on real data.", "bibtex": "@InProceedings{pmlr-v139-mukherjee21a,\n title = \t {Outlier-Robust Optimal Transport},\n author = {Mukherjee, Debarghya and Guha, Aritra and Solomon, Justin M and Sun, Yuekai and Yurochkin, Mikhail},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7850--7860},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mukherjee21a/mukherjee21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mukherjee21a.html},\n abstract = \t {Optimal transport (OT) measures distances between distributions in a way that depends on the geometry of the sample space. In light of recent advances in computational OT, OT distances are widely used as loss functions in machine learning. Despite their prevalence and advantages, OT loss functions can be extremely sensitive to outliers. In fact, a single adversarially-picked outlier can increase the standard $W_2$-distance arbitrarily. To address this issue, we propose an outlier-robust formulation of OT. Our formulation is convex but challenging to scale at a first glance. Our main contribution is deriving an \\emph{equivalent} formulation based on cost truncation that is easy to incorporate into modern algorithms for computational OT. We demonstrate the benefits of our formulation in mean estimation problems under the Huber contamination model in simulations and outlier detection tasks on real data.}\n}", "pdf": "http://proceedings.mlr.press/v139/mukherjee21a/mukherjee21a.pdf", "supp": "", "pdf_size": 1837013, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5412338136880257864&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Statistics, University of Michigan+MIT-IBM Watson AI Lab; Department of Statistical Science, Duke University; MIT CSAIL+MIT-IBM Watson AI Lab; Department of Statistics, University of Michigan; IBM Research+MIT-IBM Watson AI Lab", "aff_domain": "umich.edu; ; ; ; ", "email": "umich.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mukherjee21a.html", "aff_unique_index": "0+1;2;1+1;0;3+1", "aff_unique_norm": "University of Michigan;Massachusetts Institute of Technology;Duke University;IBM", "aff_unique_dep": "Department of Statistics;IBM Watson AI Lab;Department of Statistical Science;IBM Research", "aff_unique_url": "https://www.umich.edu;https://www.mitibmwatsonailab.org;https://www.duke.edu;https://www.ibm.com/research", "aff_unique_abbr": "UM;MIT-IBM AI Lab;Duke;IBM", "aff_campus_unique_index": "0;2;0;", "aff_campus_unique": "Ann Arbor;;Cambridge", "aff_country_unique_index": "0+0;0;0+0;0;0+0", "aff_country_unique": "United States" }, { "title": "Outside the Echo Chamber: Optimizing the Performative Risk", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9029", "id": "9029", "proceeding": "http://proceedings.mlr.press/v139/miller21a.html", "slides": "", "author_site": "John Miller, Juan Perdomo, Tijana Zrnic", "author": "John P Miller; Juan C Perdomo; Tijana Zrnic", "abstract": "In performative prediction, predictions guide decision-making and hence can influence the distribution of future data. To date, work on performative prediction has focused on finding performatively stable models, which are the fixed points of repeated retraining. However, stable solutions can be far from optimal when evaluated in terms of the performative risk, the loss experienced by the decision maker when deploying a model. In this paper, we shift attention beyond performative stability and focus on optimizing the performative risk directly. We identify a natural set of properties of the loss function and model-induced distribution shift under which the performative risk is convex, a property which does not follow from convexity of the loss alone. Furthermore, we develop algorithms that leverage our structural assumptions to optimize the performative risk with better sample efficiency than generic methods for derivative-free convex optimization.", "bibtex": "@InProceedings{pmlr-v139-miller21a,\n title = \t {Outside the Echo Chamber: Optimizing the Performative Risk},\n author = {Miller, John P and Perdomo, Juan C and Zrnic, Tijana},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7710--7720},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/miller21a/miller21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/miller21a.html},\n abstract = \t {In performative prediction, predictions guide decision-making and hence can influence the distribution of future data. To date, work on performative prediction has focused on finding performatively stable models, which are the fixed points of repeated retraining. However, stable solutions can be far from optimal when evaluated in terms of the performative risk, the loss experienced by the decision maker when deploying a model. In this paper, we shift attention beyond performative stability and focus on optimizing the performative risk directly. We identify a natural set of properties of the loss function and model-induced distribution shift under which the performative risk is convex, a property which does not follow from convexity of the loss alone. Furthermore, we develop algorithms that leverage our structural assumptions to optimize the performative risk with better sample efficiency than generic methods for derivative-free convex optimization.}\n}", "pdf": "http://proceedings.mlr.press/v139/miller21a/miller21a.pdf", "supp": "", "pdf_size": 348183, "gs_citation": 125, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2857099299697829438&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu", "email": "berkeley.edu;berkeley.edu;berkeley.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/miller21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Overcoming Catastrophic Forgetting by Bayesian Generative Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9223", "id": "9223", "proceeding": "http://proceedings.mlr.press/v139/chen21v.html", "slides": "", "author_site": "PEI-HUNG Chen, Wei Wei, Cho-Jui Hsieh, Bo Dai", "author": "Pei-Hung Chen; Wei Wei; Cho-Jui Hsieh; Bo Dai", "abstract": "In this paper, we propose a new method to over-come catastrophic forgetting by adding generative regularization to Bayesian inference frame-work. Bayesian method provides a general frame-work for continual learning. We could further construct a generative regularization term for all given classification models by leveraging energy-based models and Langevin dynamic sampling to enrich the features learned in each task. By combining discriminative and generative loss together, we empirically show that the proposed method outperforms state-of-the-art methods on a variety of tasks, avoiding catastrophic forgetting in continual learning. In particular, the proposed method outperforms baseline methods over 15%on the Fashion-MNIST dataset and 10%on the CUB dataset.", "bibtex": "@InProceedings{pmlr-v139-chen21v,\n title = \t {Overcoming Catastrophic Forgetting by Bayesian Generative Regularization},\n author = {Chen, Pei-Hung and Wei, Wei and Hsieh, Cho-Jui and Dai, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1760--1770},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21v/chen21v.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21v.html},\n abstract = \t {In this paper, we propose a new method to over-come catastrophic forgetting by adding generative regularization to Bayesian inference frame-work. Bayesian method provides a general frame-work for continual learning. We could further construct a generative regularization term for all given classification models by leveraging energy-based models and Langevin dynamic sampling to enrich the features learned in each task. By combining discriminative and generative loss together, we empirically show that the proposed method outperforms state-of-the-art methods on a variety of tasks, avoiding catastrophic forgetting in continual learning. In particular, the proposed method outperforms baseline methods over 15%on the Fashion-MNIST dataset and 10%on the CUB dataset.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21v/chen21v.pdf", "supp": "", "pdf_size": 573077, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4555230603344300328&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, UCLA, California, USA; Google Cloud, Sunnyvale, California, USA; Department of Computer Science, UCLA, California, USA; Google Brain, Mountain View, California, USA", "aff_domain": "g.ucla.edu; ; ; ", "email": "g.ucla.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chen21v.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "University of California, Los Angeles;Google", "aff_unique_dep": "Department of Computer Science;Google Cloud", "aff_unique_url": "https://www.ucla.edu;https://cloud.google.com", "aff_unique_abbr": "UCLA;Google Cloud", "aff_campus_unique_index": "0;1;0;2", "aff_campus_unique": "Los Angeles;Sunnyvale;Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "PAC-Learning for Strategic Classification", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9137", "id": "9137", "proceeding": "http://proceedings.mlr.press/v139/sundaram21a.html", "slides": "", "author_site": "Ravi Sundaram, Anil Vullikanti, Haifeng Xu, Fan Yao", "author": "Ravi Sundaram; Anil Vullikanti; Haifeng Xu; Fan Yao", "abstract": "The study of strategic or adversarial manipulation of testing data to fool a classifier has attracted much recent attention. Most previous works have focused on two extreme situations where any testing data point either is completely adversarial or always equally prefers the positive label. In this paper, we generalize both of these through a unified framework for strategic classification and introduce the notion of strategic VC-dimension (SVC) to capture the PAC-learnability in our general strategic setup. SVC provably generalizes the recent concept of adversarial VC-dimension (AVC) introduced by Cullina et al. (2018). We instantiate our framework for the fundamental strategic linear classification problem. We fully characterize: (1) the statistical learnability of linear classifiers by pinning down its SVC; (2) it\u2019s computational tractability by pinning down the complexity of the empirical risk minimization problem. Interestingly, the SVC of linear classifiers is always upper bounded by its standard VC-dimension. This characterization also strictly generalizes the AVC bound for linear classifiers in (Cullina et al., 2018).", "bibtex": "@InProceedings{pmlr-v139-sundaram21a,\n title = \t {PAC-Learning for Strategic Classification},\n author = {Sundaram, Ravi and Vullikanti, Anil and Xu, Haifeng and Yao, Fan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9978--9988},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sundaram21a/sundaram21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sundaram21a.html},\n abstract = \t {The study of strategic or adversarial manipulation of testing data to fool a classifier has attracted much recent attention. Most previous works have focused on two extreme situations where any testing data point either is completely adversarial or always equally prefers the positive label. In this paper, we generalize both of these through a unified framework for strategic classification and introduce the notion of strategic VC-dimension (SVC) to capture the PAC-learnability in our general strategic setup. SVC provably generalizes the recent concept of adversarial VC-dimension (AVC) introduced by Cullina et al. (2018). We instantiate our framework for the fundamental strategic linear classification problem. We fully characterize: (1) the statistical learnability of linear classifiers by pinning down its SVC; (2) it\u2019s computational tractability by pinning down the complexity of the empirical risk minimization problem. Interestingly, the SVC of linear classifiers is always upper bounded by its standard VC-dimension. This characterization also strictly generalizes the AVC bound for linear classifiers in (Cullina et al., 2018).}\n}", "pdf": "http://proceedings.mlr.press/v139/sundaram21a/sundaram21a.pdf", "supp": "", "pdf_size": 622405, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5170756776165921289&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Khoury College of Computer Science, Northeastern University, Boston, MA 02115; Department of Computer Science, University of Virginia, Charlottesville, VA 22904 + Biocomplexity Institute and Initiative, University of Virginia, Charlottesville, VA 22904; Department of Computer Science, University of Virginia, Charlottesville, VA 22904; Department of Computer Science, University of Virginia, Charlottesville, VA 22904", "aff_domain": "virginia.edu;virginia.edu;virginia.edu;virginia.edu", "email": "virginia.edu;virginia.edu;virginia.edu;virginia.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sundaram21a.html", "aff_unique_index": "0;1+1;1;1", "aff_unique_norm": "Northeastern University;University of Virginia", "aff_unique_dep": "Khoury College of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.northeastern.edu;https://www.virginia.edu", "aff_unique_abbr": "NU;UVA", "aff_campus_unique_index": "0;1+1;1;1", "aff_campus_unique": "Boston;Charlottesville", "aff_country_unique_index": "0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "PACOH: Bayes-Optimal Meta-Learning with PAC-Guarantees", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10421", "id": "10421", "proceeding": "http://proceedings.mlr.press/v139/rothfuss21a.html", "slides": "/media/icml-2021/Slides/10421.pdf", "author_site": "Jonas Rothfuss, Vincent Fortuin, Martin Josifoski, Andreas Krause", "author": "Jonas Rothfuss; Vincent Fortuin; Martin Josifoski; Andreas Krause", "abstract": "Meta-learning can successfully acquire useful inductive biases from data. Yet, its generalization properties to unseen learning tasks are poorly understood. Particularly if the number of meta-training tasks is small, this raises concerns about overfitting. We provide a theoretical analysis using the PAC-Bayesian framework and derive novel generalization bounds for meta-learning. Using these bounds, we develop a class of PAC-optimal meta-learning algorithms with performance guarantees and a principled meta-level regularization. Unlike previous PAC-Bayesian meta-learners, our method results in a standard stochastic optimization problem which can be solved efficiently and scales well.When instantiating our PAC-optimal hyper-posterior (PACOH) with Gaussian processes and Bayesian Neural Networks as base learners, the resulting methods yield state-of-the-art performance, both in terms of predictive accuracy and the quality of uncertainty estimates. Thanks to their principled treatment of uncertainty, our meta-learners can also be successfully employed for sequential decision problems.", "bibtex": "@InProceedings{pmlr-v139-rothfuss21a,\n title = \t {PACOH: Bayes-Optimal Meta-Learning with PAC-Guarantees},\n author = {Rothfuss, Jonas and Fortuin, Vincent and Josifoski, Martin and Krause, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9116--9126},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rothfuss21a/rothfuss21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rothfuss21a.html},\n abstract = \t {Meta-learning can successfully acquire useful inductive biases from data. Yet, its generalization properties to unseen learning tasks are poorly understood. Particularly if the number of meta-training tasks is small, this raises concerns about overfitting. We provide a theoretical analysis using the PAC-Bayesian framework and derive novel generalization bounds for meta-learning. Using these bounds, we develop a class of PAC-optimal meta-learning algorithms with performance guarantees and a principled meta-level regularization. Unlike previous PAC-Bayesian meta-learners, our method results in a standard stochastic optimization problem which can be solved efficiently and scales well.When instantiating our PAC-optimal hyper-posterior (PACOH) with Gaussian processes and Bayesian Neural Networks as base learners, the resulting methods yield state-of-the-art performance, both in terms of predictive accuracy and the quality of uncertainty estimates. Thanks to their principled treatment of uncertainty, our meta-learners can also be successfully employed for sequential decision problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/rothfuss21a/rothfuss21a.pdf", "supp": "", "pdf_size": 1335365, "gs_citation": 141, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12050746952935759142&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "1ETH Zurich, Switzerland; 1ETH Zurich, Switzerland; 2EPFL, Switzerland; 1ETH Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ; ; ", "email": "inf.ethz.ch; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rothfuss21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "ETH Zurich;Ecole Polytechnique Federale de Lausanne", "aff_unique_dep": ";", "aff_unique_url": "https://www.ethz.ch;https://www.epfl.ch", "aff_unique_abbr": "ETHZ;EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "PAGE: A Simple and Optimal Probabilistic Gradient Estimator for Nonconvex Optimization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8449", "id": "8449", "proceeding": "http://proceedings.mlr.press/v139/li21a.html", "slides": "/media/icml-2021/Slides/8449.pdf", "author_site": "Zhize Li, Hongyan Bao, Xiangliang Zhang, Peter Richtarik", "author": "Zhize Li; Hongyan Bao; Xiangliang Zhang; Peter Richtarik", "abstract": "In this paper, we propose a novel stochastic gradient estimator\u2014ProbAbilistic Gradient Estimator (PAGE)\u2014for nonconvex optimization. PAGE is easy to implement as it is designed via a small adjustment to vanilla SGD: in each iteration, PAGE uses the vanilla minibatch SGD update with probability $p_t$ or reuses the previous gradient with a small adjustment, at a much lower computational cost, with probability $1-p_t$. We give a simple formula for the optimal choice of $p_t$. Moreover, we prove the first tight lower bound $\\Omega(n+\\frac{\\sqrt{n}}{\\epsilon^2})$ for nonconvex finite-sum problems, which also leads to a tight lower bound $\\Omega(b+\\frac{\\sqrt{b}}{\\epsilon^2})$ for nonconvex online problems, where $b:= \\min\\{\\frac{\\sigma^2}{\\epsilon^2}, n\\}$. Then, we show that PAGE obtains the optimal convergence results $O(n+\\frac{\\sqrt{n}}{\\epsilon^2})$ (finite-sum) and $O(b+\\frac{\\sqrt{b}}{\\epsilon^2})$ (online) matching our lower bounds for both nonconvex finite-sum and online problems. Besides, we also show that for nonconvex functions satisfying the Polyak-\u0141{ojasiewicz} (PL) condition, PAGE can automatically switch to a faster linear convergence rate $O(\\cdot\\log \\frac{1}{\\epsilon})$. Finally, we conduct several deep learning experiments (e.g., LeNet, VGG, ResNet) on real datasets in PyTorch showing that PAGE not only converges much faster than SGD in training but also achieves the higher test accuracy, validating the optimal theoretical results and confirming the practical superiority of PAGE.", "bibtex": "@InProceedings{pmlr-v139-li21a,\n title = \t {PAGE: A Simple and Optimal Probabilistic Gradient Estimator for Nonconvex Optimization},\n author = {Li, Zhize and Bao, Hongyan and Zhang, Xiangliang and Richtarik, Peter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6286--6295},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21a/li21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21a.html},\n abstract = \t {In this paper, we propose a novel stochastic gradient estimator\u2014ProbAbilistic Gradient Estimator (PAGE)\u2014for nonconvex optimization. PAGE is easy to implement as it is designed via a small adjustment to vanilla SGD: in each iteration, PAGE uses the vanilla minibatch SGD update with probability $p_t$ or reuses the previous gradient with a small adjustment, at a much lower computational cost, with probability $1-p_t$. We give a simple formula for the optimal choice of $p_t$. Moreover, we prove the first tight lower bound $\\Omega(n+\\frac{\\sqrt{n}}{\\epsilon^2})$ for nonconvex finite-sum problems, which also leads to a tight lower bound $\\Omega(b+\\frac{\\sqrt{b}}{\\epsilon^2})$ for nonconvex online problems, where $b:= \\min\\{\\frac{\\sigma^2}{\\epsilon^2}, n\\}$. Then, we show that PAGE obtains the optimal convergence results $O(n+\\frac{\\sqrt{n}}{\\epsilon^2})$ (finite-sum) and $O(b+\\frac{\\sqrt{b}}{\\epsilon^2})$ (online) matching our lower bounds for both nonconvex finite-sum and online problems. Besides, we also show that for nonconvex functions satisfying the Polyak-\u0141{ojasiewicz} (PL) condition, PAGE can automatically switch to a faster linear convergence rate $O(\\cdot\\log \\frac{1}{\\epsilon})$. Finally, we conduct several deep learning experiments (e.g., LeNet, VGG, ResNet) on real datasets in PyTorch showing that PAGE not only converges much faster than SGD in training but also achieves the higher test accuracy, validating the optimal theoretical results and confirming the practical superiority of PAGE.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21a/li21a.pdf", "supp": "", "pdf_size": 1243799, "gs_citation": 160, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9149815351596567733&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "King Abdullah University of Science and Technology; King Abdullah University of Science and Technology; King Abdullah University of Science and Technology; King Abdullah University of Science and Technology", "aff_domain": "kaust.edu.sa; ; ; ", "email": "kaust.edu.sa; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/li21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "King Abdullah University of Science and Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.kast.kau.edu.sa", "aff_unique_abbr": "KAUST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Saudi Arabia" }, { "title": "PAPRIKA: Private Online False Discovery Rate Control", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8425", "id": "8425", "proceeding": "http://proceedings.mlr.press/v139/zhang21j.html", "slides": "/media/icml-2021/Slides/8425.pdf", "author_site": "Wanrong Zhang, Gautam Kamath, Rachel Cummings", "author": "Wanrong Zhang; Gautam Kamath; Rachel Cummings", "abstract": "In hypothesis testing, a \\emph{false discovery} occurs when a hypothesis is incorrectly rejected due to noise in the sample. When adaptively testing multiple hypotheses, the probability of a false discovery increases as more tests are performed. Thus the problem of \\emph{False Discovery Rate (FDR) control} is to find a procedure for testing multiple hypotheses that accounts for this effect in determining the set of hypotheses to reject. The goal is to minimize the number (or fraction) of false discoveries, while maintaining a high true positive rate (i.e., correct discoveries). In this work, we study False Discovery Rate (FDR) control in multiple hypothesis testing under the constraint of differential privacy for the sample. Unlike previous work in this direction, we focus on the \\emph{online setting}, meaning that a decision about each hypothesis must be made immediately after the test is performed, rather than waiting for the output of all tests as in the offline setting. We provide new private algorithms based on state-of-the-art results in non-private online FDR control. Our algorithms have strong provable guarantees for privacy and statistical performance as measured by FDR and power. We also provide experimental results to demonstrate the efficacy of our algorithms in a variety of data environments.", "bibtex": "@InProceedings{pmlr-v139-zhang21j,\n title = \t {PAPRIKA: Private Online False Discovery Rate Control},\n author = {Zhang, Wanrong and Kamath, Gautam and Cummings, Rachel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12458--12467},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21j/zhang21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21j.html},\n abstract = \t {In hypothesis testing, a \\emph{false discovery} occurs when a hypothesis is incorrectly rejected due to noise in the sample. When adaptively testing multiple hypotheses, the probability of a false discovery increases as more tests are performed. Thus the problem of \\emph{False Discovery Rate (FDR) control} is to find a procedure for testing multiple hypotheses that accounts for this effect in determining the set of hypotheses to reject. The goal is to minimize the number (or fraction) of false discoveries, while maintaining a high true positive rate (i.e., correct discoveries). In this work, we study False Discovery Rate (FDR) control in multiple hypothesis testing under the constraint of differential privacy for the sample. Unlike previous work in this direction, we focus on the \\emph{online setting}, meaning that a decision about each hypothesis must be made immediately after the test is performed, rather than waiting for the output of all tests as in the offline setting. We provide new private algorithms based on state-of-the-art results in non-private online FDR control. Our algorithms have strong provable guarantees for privacy and statistical performance as measured by FDR and power. We also provide experimental results to demonstrate the efficacy of our algorithms in a variety of data environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21j/zhang21j.pdf", "supp": "", "pdf_size": 8918691, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16053819406696763043&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "H. Milton Stewart School of Industrial and Systems Engineering, Georgia Institute of Technology, Atlanta, GA, USA; Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada; Department of Industrial Engineering and Operations Research, Columbia University, New York, NY, USA", "aff_domain": "gatech.edu;csail.mit.edu;columbia.edu", "email": "gatech.edu;csail.mit.edu;columbia.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21j.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Georgia Institute of Technology;University of Waterloo;Columbia University", "aff_unique_dep": "H. Milton Stewart School of Industrial and Systems Engineering;Cheriton School of Computer Science;Department of Industrial Engineering and Operations Research", "aff_unique_url": "https://www.gatech.edu;https://uwaterloo.ca;https://www.columbia.edu", "aff_unique_abbr": "Georgia Tech;UW;Columbia", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Atlanta;Waterloo;New York", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "PC-MLP: Model-based Reinforcement Learning with Policy Cover Guided Exploration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9705", "id": "9705", "proceeding": "http://proceedings.mlr.press/v139/song21b.html", "slides": "", "author_site": "Yuda Song, Wen Sun", "author": "Yuda Song; Wen Sun", "abstract": "Model-based Reinforcement Learning (RL) is a popular learning paradigm due to its potential sample efficiency compared to model-free RL. However, existing empirical model-based RL approaches lack the ability to explore. This work studies a computationally and statistically efficient model-based algorithm for both Kernelized Nonlinear Regulators (KNR) and linear Markov Decision Processes (MDPs). For both models, our algorithm guarantees polynomial sample complexity and only uses access to a planning oracle. Experimentally, we first demonstrate the flexibility and the efficacy of our algorithm on a set of exploration challenging control tasks where existing empirical model-based RL approaches completely fail. We then show that our approach retains excellent performance even in common dense reward control benchmarks that do not require heavy exploration.", "bibtex": "@InProceedings{pmlr-v139-song21b,\n title = \t {PC-MLP: Model-based Reinforcement Learning with Policy Cover Guided Exploration},\n author = {Song, Yuda and Sun, Wen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9801--9811},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/song21b/song21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/song21b.html},\n abstract = \t {Model-based Reinforcement Learning (RL) is a popular learning paradigm due to its potential sample efficiency compared to model-free RL. However, existing empirical model-based RL approaches lack the ability to explore. This work studies a computationally and statistically efficient model-based algorithm for both Kernelized Nonlinear Regulators (KNR) and linear Markov Decision Processes (MDPs). For both models, our algorithm guarantees polynomial sample complexity and only uses access to a planning oracle. Experimentally, we first demonstrate the flexibility and the efficacy of our algorithm on a set of exploration challenging control tasks where existing empirical model-based RL approaches completely fail. We then show that our approach retains excellent performance even in common dense reward control benchmarks that do not require heavy exploration.}\n}", "pdf": "http://proceedings.mlr.press/v139/song21b/song21b.pdf", "supp": "", "pdf_size": 3338298, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8561706312159715447&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Machine Learning Department, Carnegie Mellon University, Pittsburgh, USA+*; Department of Computer Science, Cornell University, Ithaca, USA", "aff_domain": "andrew.cmu.edu; ", "email": "andrew.cmu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/song21b.html", "aff_unique_index": "0;2", "aff_unique_norm": "Carnegie Mellon University;;Cornell University", "aff_unique_dep": "Machine Learning Department;;Department of Computer Science", "aff_unique_url": "https://www.cmu.edu;;https://www.cornell.edu", "aff_unique_abbr": "CMU;;Cornell", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Pittsburgh;;Ithaca", "aff_country_unique_index": "0;0", "aff_country_unique": "United States;" }, { "title": "PEBBLE: Feedback-Efficient Interactive Reinforcement Learning via Relabeling Experience and Unsupervised Pre-training", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8487", "id": "8487", "proceeding": "http://proceedings.mlr.press/v139/lee21i.html", "slides": "", "author_site": "Kimin Lee, Laura Smith, Pieter Abbeel", "author": "Kimin Lee; Laura M Smith; Pieter Abbeel", "abstract": "Conveying complex objectives to reinforcement learning (RL) agents can often be difficult, involving meticulous design of reward functions that are sufficiently informative yet easy enough to provide. Human-in-the-loop RL methods allow practitioners to instead interactively teach agents through tailored feedback; however, such approaches have been challenging to scale since human feedback is very expensive. In this work, we aim to make this process more sample- and feedback-efficient. We present an off-policy, interactive RL algorithm that capitalizes on the strengths of both feedback and off-policy learning. Specifically, we learn a reward model by actively querying a teacher\u2019s preferences between two clips of behavior and use it to train an agent. To enable off-policy learning, we relabel all the agent\u2019s past experience when its reward model changes. We additionally show that pre-training our agents with unsupervised exploration substantially increases the mileage of its queries. We demonstrate that our approach is capable of learning tasks of higher complexity than previously considered by human-in-the-loop methods, including a variety of locomotion and robotic manipulation skills. We also show that our method is able to utilize real-time human feedback to effectively prevent reward exploitation and learn new behaviors that are difficult to specify with standard reward functions.", "bibtex": "@InProceedings{pmlr-v139-lee21i,\n title = \t {PEBBLE: Feedback-Efficient Interactive Reinforcement Learning via Relabeling Experience and Unsupervised Pre-training},\n author = {Lee, Kimin and Smith, Laura M and Abbeel, Pieter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6152--6163},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21i/lee21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21i.html},\n abstract = \t {Conveying complex objectives to reinforcement learning (RL) agents can often be difficult, involving meticulous design of reward functions that are sufficiently informative yet easy enough to provide. Human-in-the-loop RL methods allow practitioners to instead interactively teach agents through tailored feedback; however, such approaches have been challenging to scale since human feedback is very expensive. In this work, we aim to make this process more sample- and feedback-efficient. We present an off-policy, interactive RL algorithm that capitalizes on the strengths of both feedback and off-policy learning. Specifically, we learn a reward model by actively querying a teacher\u2019s preferences between two clips of behavior and use it to train an agent. To enable off-policy learning, we relabel all the agent\u2019s past experience when its reward model changes. We additionally show that pre-training our agents with unsupervised exploration substantially increases the mileage of its queries. We demonstrate that our approach is capable of learning tasks of higher complexity than previously considered by human-in-the-loop methods, including a variety of locomotion and robotic manipulation skills. We also show that our method is able to utilize real-time human feedback to effectively prevent reward exploitation and learn new behaviors that are difficult to specify with standard reward functions.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21i/lee21i.pdf", "supp": "", "pdf_size": 7190209, "gs_citation": 343, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9254305801075741995&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu; ", "email": "berkeley.edu;berkeley.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lee21i.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "PHEW : Constructing Sparse Networks that Learn Fast and Generalize Well without Training Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9917", "id": "9917", "proceeding": "http://proceedings.mlr.press/v139/patil21a.html", "slides": "/media/icml-2021/Slides/9917.pdf", "author_site": "Shreyas Malakarjun Patil, Constantine Dovrolis", "author": "Shreyas Malakarjun Patil; Constantine Dovrolis", "abstract": "Methods that sparsify a network at initialization are important in practice because they greatly improve the efficiency of both learning and inference. Our work is based on a recently proposed decomposition of the Neural Tangent Kernel (NTK) that has decoupled the dynamics of the training process into a data-dependent component and an architecture-dependent kernel {\u2013} the latter referred to as Path Kernel. That work has shown how to design sparse neural networks for faster convergence, without any training data, using the Synflow-L2 algorithm. We first show that even though Synflow-L2 is optimal in terms of convergence, for a given network density, it results in sub-networks with \u201cbottleneck\u201d (narrow) layers {\u2013} leading to poor performance as compared to other data-agnostic methods that use the same number of parameters. Then we propose a new method to construct sparse networks, without any training data, referred to as Paths with Higher-Edge Weights (PHEW). PHEW is a probabilistic network formation method based on biased random walks that only depends on the initial weights. It has similar path kernel properties as Synflow-L2 but it generates much wider layers, resulting in better generalization and performance. PHEW achieves significant improvements over the data-independent SynFlow and SynFlow-L2 methods at a wide range of network densities.", "bibtex": "@InProceedings{pmlr-v139-patil21a,\n title = \t {PHEW : Constructing Sparse Networks that Learn Fast and Generalize Well without Training Data},\n author = {Patil, Shreyas Malakarjun and Dovrolis, Constantine},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8432--8442},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/patil21a/patil21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/patil21a.html},\n abstract = \t {Methods that sparsify a network at initialization are important in practice because they greatly improve the efficiency of both learning and inference. Our work is based on a recently proposed decomposition of the Neural Tangent Kernel (NTK) that has decoupled the dynamics of the training process into a data-dependent component and an architecture-dependent kernel {\u2013} the latter referred to as Path Kernel. That work has shown how to design sparse neural networks for faster convergence, without any training data, using the Synflow-L2 algorithm. We first show that even though Synflow-L2 is optimal in terms of convergence, for a given network density, it results in sub-networks with \u201cbottleneck\u201d (narrow) layers {\u2013} leading to poor performance as compared to other data-agnostic methods that use the same number of parameters. Then we propose a new method to construct sparse networks, without any training data, referred to as Paths with Higher-Edge Weights (PHEW). PHEW is a probabilistic network formation method based on biased random walks that only depends on the initial weights. It has similar path kernel properties as Synflow-L2 but it generates much wider layers, resulting in better generalization and performance. PHEW achieves significant improvements over the data-independent SynFlow and SynFlow-L2 methods at a wide range of network densities.}\n}", "pdf": "http://proceedings.mlr.press/v139/patil21a/patil21a.pdf", "supp": "", "pdf_size": 1076513, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16443738349013297627&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "School of Computer Science, Georgia Institute of Technology, USA; School of Computer Science, Georgia Institute of Technology, USA", "aff_domain": "gatech.edu;gatech.edu", "email": "gatech.edu;gatech.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/patil21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Georgia Tech", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "PID Accelerated Value Iteration Algorithm", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8725", "id": "8725", "proceeding": "http://proceedings.mlr.press/v139/farahmand21a.html", "slides": "/media/icml-2021/Slides/8725_GylPZ7z.pdf", "author_site": "Amir-massoud Farahmand, Mohammad Ghavamzadeh", "author": "Amir-Massoud Farahmand; Mohammad Ghavamzadeh", "abstract": "The convergence rate of Value Iteration (VI), a fundamental procedure in dynamic programming and reinforcement learning, for solving MDPs can be slow when the discount factor is close to one. We propose modifications to VI in order to potentially accelerate its convergence behaviour. The key insight is the realization that the evolution of the value function approximations $(V_k)_{k \\geq 0}$ in the VI procedure can be seen as a dynamical system. This opens up the possibility of using techniques from \\emph{control theory} to modify, and potentially accelerate, this dynamics. We present such modifications based on simple controllers, such as PD (Proportional-Derivative), PI (Proportional-Integral), and PID. We present the error dynamics of these variants of VI, and provably (for certain classes of MDPs) and empirically (for more general classes) show that the convergence rate can be significantly improved. We also propose a gain adaptation mechanism in order to automatically select the controller gains, and empirically show the effectiveness of this procedure.", "bibtex": "@InProceedings{pmlr-v139-farahmand21a,\n title = \t {PID Accelerated Value Iteration Algorithm},\n author = {Farahmand, Amir-Massoud and Ghavamzadeh, Mohammad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3143--3153},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/farahmand21a/farahmand21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/farahmand21a.html},\n abstract = \t {The convergence rate of Value Iteration (VI), a fundamental procedure in dynamic programming and reinforcement learning, for solving MDPs can be slow when the discount factor is close to one. We propose modifications to VI in order to potentially accelerate its convergence behaviour. The key insight is the realization that the evolution of the value function approximations $(V_k)_{k \\geq 0}$ in the VI procedure can be seen as a dynamical system. This opens up the possibility of using techniques from \\emph{control theory} to modify, and potentially accelerate, this dynamics. We present such modifications based on simple controllers, such as PD (Proportional-Derivative), PI (Proportional-Integral), and PID. We present the error dynamics of these variants of VI, and provably (for certain classes of MDPs) and empirically (for more general classes) show that the convergence rate can be significantly improved. We also propose a gain adaptation mechanism in order to automatically select the controller gains, and empirically show the effectiveness of this procedure.}\n}", "pdf": "http://proceedings.mlr.press/v139/farahmand21a/farahmand21a.pdf", "supp": "", "pdf_size": 1336881, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4533174792810297448&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Vector Institute, Toronto, Canada+Department of Computer Science, University of Toronto, Canada; Google Research, Mountain View, California, USA", "aff_domain": "vectorinstitute.ai; ", "email": "vectorinstitute.ai; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/farahmand21a.html", "aff_unique_index": "0+1;2", "aff_unique_norm": "Vector Institute;University of Toronto;Google", "aff_unique_dep": ";Department of Computer Science;Google Research", "aff_unique_url": "https://vectorinstitute.ai;https://www.utoronto.ca;https://research.google", "aff_unique_abbr": "Vector Institute;U of T;Google", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Toronto;;Mountain View", "aff_country_unique_index": "0+0;1", "aff_country_unique": "Canada;United States" }, { "title": "PODS: Policy Optimization via Differentiable Simulation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8497", "id": "8497", "proceeding": "http://proceedings.mlr.press/v139/mora21a.html", "slides": "/media/icml-2021/Slides/8497.pdf", "author_site": "Miguel Angel Zamora Mora, Momchil Peychev, Sehoon Ha, Martin Vechev, Stelian Coros", "author": "Miguel Angel Zamora Mora; Momchil Peychev; Sehoon Ha; Martin Vechev; Stelian Coros", "abstract": "Current reinforcement learning (RL) methods use simulation models as simple black-box oracles. In this paper, with the goal of improving the performance exhibited by RL algorithms, we explore a systematic way of leveraging the additional information provided by an emerging class of differentiable simulators. Building on concepts established by Deterministic Policy Gradients (DPG) methods, the neural network policies learned with our approach represent deterministic actions. In a departure from standard methodologies, however, learning these policies does not hinge on approximations of the value function that must be learned concurrently in an actor-critic fashion. Instead, we exploit differentiable simulators to directly compute the analytic gradient of a policy\u2019s value function with respect to the actions it outputs. This, in turn, allows us to efficiently perform locally optimal policy improvement iterations. Compared against other state-of-the-art RL methods, we show that with minimal hyper-parameter tuning our approach consistently leads to better asymptotic behavior across a set of payload manipulation tasks that demand a high degree of accuracy and precision.", "bibtex": "@InProceedings{pmlr-v139-mora21a,\n title = \t {PODS: Policy Optimization via Differentiable Simulation},\n author = {Mora, Miguel Angel Zamora and Peychev, Momchil and Ha, Sehoon and Vechev, Martin and Coros, Stelian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7805--7817},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mora21a/mora21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mora21a.html},\n abstract = \t {Current reinforcement learning (RL) methods use simulation models as simple black-box oracles. In this paper, with the goal of improving the performance exhibited by RL algorithms, we explore a systematic way of leveraging the additional information provided by an emerging class of differentiable simulators. Building on concepts established by Deterministic Policy Gradients (DPG) methods, the neural network policies learned with our approach represent deterministic actions. In a departure from standard methodologies, however, learning these policies does not hinge on approximations of the value function that must be learned concurrently in an actor-critic fashion. Instead, we exploit differentiable simulators to directly compute the analytic gradient of a policy\u2019s value function with respect to the actions it outputs. This, in turn, allows us to efficiently perform locally optimal policy improvement iterations. Compared against other state-of-the-art RL methods, we show that with minimal hyper-parameter tuning our approach consistently leads to better asymptotic behavior across a set of payload manipulation tasks that demand a high degree of accuracy and precision.}\n}", "pdf": "http://proceedings.mlr.press/v139/mora21a/mora21a.pdf", "supp": "", "pdf_size": 2151921, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16993263954160231905&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland; School of Interactive Computing, Georgia Institute of Technology, Georgia, USA; Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ; ; ; ", "email": "inf.ethz.ch; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/mora21a.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "ETH Zurich;Georgia Institute of Technology", "aff_unique_dep": "Department of Computer Science;School of Interactive Computing", "aff_unique_url": "https://www.ethz.ch;https://www.gatech.edu", "aff_unique_abbr": "ETHZ;Georgia Tech", "aff_campus_unique_index": "0;0;1;0;0", "aff_campus_unique": "Zurich;Georgia", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "Switzerland;United States" }, { "title": "Parallel Droplet Control in MEDA Biochips using Multi-Agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10575", "id": "10575", "proceeding": "http://proceedings.mlr.press/v139/liang21c.html", "slides": "", "author_site": "Tung-Che Liang, Jin Zhou, Yun-Sheng Chan, Tsung-Yi Ho, Krishnendu Chakrabarty, Cy Lee", "author": "Tung-Che Liang; Jin Zhou; Yun-Sheng Chan; Tsung-Yi Ho; Krishnendu Chakrabarty; Cy Lee", "abstract": "Microfluidic biochips are being utilized for clinical diagnostics, including COVID-19 testing, because of they provide sample-to-result turnaround at low cost. Recently, microelectrode-dot-array (MEDA) biochips have been proposed to advance microfluidics technology. A MEDA biochip manipulates droplets of nano/picoliter volumes to automatically execute biochemical protocols. During bioassay execution, droplets are transported in parallel to achieve high-throughput outcomes. However, a major concern associated with the use of MEDA biochips is microelectrode degradation over time. Recent work has shown that formulating droplet transportation as a reinforcement-learning (RL) problem enables the training of policies to capture the underlying health conditions of microelectrodes and ensure reliable fluidic operations. However, the above RL-based approach suffers from two key limitations: 1) it cannot be used for concurrent transportation of multiple droplets; 2) it requires the availability of CCD cameras for monitoring droplet movement. To overcome these problems, we present a multi-agent reinforcement learning (MARL) droplet-routing solution that can be used for various sizes of MEDA biochips with integrated sensors, and we demonstrate the reliable execution of a serial-dilution bioassay with the MARL droplet router on a fabricated MEDA biochip. To facilitate further research, we also present a simulation environment based on the PettingZoo Gym Interface for MARL-guided droplet-routing problems on MEDA biochips.", "bibtex": "@InProceedings{pmlr-v139-liang21c,\n title = \t {Parallel Droplet Control in MEDA Biochips using Multi-Agent Reinforcement Learning},\n author = {Liang, Tung-Che and Zhou, Jin and Chan, Yun-Sheng and Ho, Tsung-Yi and Chakrabarty, Krishnendu and Lee, Cy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6588--6599},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liang21c/liang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/liang21c.html},\n abstract = \t {Microfluidic biochips are being utilized for clinical diagnostics, including COVID-19 testing, because of they provide sample-to-result turnaround at low cost. Recently, microelectrode-dot-array (MEDA) biochips have been proposed to advance microfluidics technology. A MEDA biochip manipulates droplets of nano/picoliter volumes to automatically execute biochemical protocols. During bioassay execution, droplets are transported in parallel to achieve high-throughput outcomes. However, a major concern associated with the use of MEDA biochips is microelectrode degradation over time. Recent work has shown that formulating droplet transportation as a reinforcement-learning (RL) problem enables the training of policies to capture the underlying health conditions of microelectrodes and ensure reliable fluidic operations. However, the above RL-based approach suffers from two key limitations: 1) it cannot be used for concurrent transportation of multiple droplets; 2) it requires the availability of CCD cameras for monitoring droplet movement. To overcome these problems, we present a multi-agent reinforcement learning (MARL) droplet-routing solution that can be used for various sizes of MEDA biochips with integrated sensors, and we demonstrate the reliable execution of a serial-dilution bioassay with the MARL droplet router on a fabricated MEDA biochip. To facilitate further research, we also present a simulation environment based on the PettingZoo Gym Interface for MARL-guided droplet-routing problems on MEDA biochips.}\n}", "pdf": "http://proceedings.mlr.press/v139/liang21c/liang21c.pdf", "supp": "", "pdf_size": 9291462, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4452727534932265030&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, Duke University, Durham, NC, USA; Department of Electrical and Computer Engineering, Duke University, Durham, NC, USA; Department of Electronics Engineering, National Yang Ming Chiao Tung University, Hsinchu, Taiwan; Department of Computer Science, National Tsing Hua University, Hsinchu, Taiwan; Department of Electrical and Computer Engineering, Duke University, Durham, NC, USA; Department of Electronics Engineering, National Yang Ming Chiao Tung University, Hsinchu, Taiwan", "aff_domain": "duke.edu; ; ; ; ; ", "email": "duke.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/liang21c.html", "aff_unique_index": "0;0;1;2;0;1", "aff_unique_norm": "Duke University;National Yang Ming Chiao Tung University;National Tsing Hua University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Electronics Engineering;Department of Computer Science", "aff_unique_url": "https://www.duke.edu;https://www.nctu.edu.tw;https://www.nthu.edu.tw", "aff_unique_abbr": "Duke;NYCU;NTHU", "aff_campus_unique_index": "0;0;1;1;0;1", "aff_campus_unique": "Durham;Taiwan", "aff_country_unique_index": "0;0;1;1;0;1", "aff_country_unique": "United States;China" }, { "title": "Parallel and Flexible Sampling from Autoregressive Models via Langevin Dynamics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9745", "id": "9745", "proceeding": "http://proceedings.mlr.press/v139/jayaram21b.html", "slides": "", "author_site": "Vivek Jayaram, John Thickstun", "author": "Vivek Jayaram; John Thickstun", "abstract": "This paper introduces an alternative approach to sampling from autoregressive models. Autoregressive models are typically sampled sequentially, according to the transition dynamics defined by the model. Instead, we propose a sampling procedure that initializes a sequence with white noise and follows a Markov chain defined by Langevin dynamics on the global log-likelihood of the sequence. This approach parallelizes the sampling process and generalizes to conditional sampling. Using an autoregressive model as a Bayesian prior, we can steer the output of a generative model using a conditional likelihood or constraints. We apply these techniques to autoregressive models in the visual and audio domains, with competitive results for audio source separation, super-resolution, and inpainting.", "bibtex": "@InProceedings{pmlr-v139-jayaram21b,\n title = \t {Parallel and Flexible Sampling from Autoregressive Models via Langevin Dynamics},\n author = {Jayaram, Vivek and Thickstun, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4807--4818},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jayaram21b/jayaram21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jayaram21b.html},\n abstract = \t {This paper introduces an alternative approach to sampling from autoregressive models. Autoregressive models are typically sampled sequentially, according to the transition dynamics defined by the model. Instead, we propose a sampling procedure that initializes a sequence with white noise and follows a Markov chain defined by Langevin dynamics on the global log-likelihood of the sequence. This approach parallelizes the sampling process and generalizes to conditional sampling. Using an autoregressive model as a Bayesian prior, we can steer the output of a generative model using a conditional likelihood or constraints. We apply these techniques to autoregressive models in the visual and audio domains, with competitive results for audio source separation, super-resolution, and inpainting.}\n}", "pdf": "http://proceedings.mlr.press/v139/jayaram21b/jayaram21b.pdf", "supp": "", "pdf_size": 1425485, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6113516044812949338&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Washington; Department of Computer Science, University of Washington", "aff_domain": "cs.washington.edu;cs.washington.edu", "email": "cs.washington.edu;cs.washington.edu", "github": "", "project": "https://grail.cs.washington.edu/projects/pnf-sampling/", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jayaram21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Parallel tempering on optimized paths", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8923", "id": "8923", "proceeding": "http://proceedings.mlr.press/v139/syed21a.html", "slides": "", "author_site": "Saifuddin Syed, Vittorio Romaniello, Trevor Campbell, Alexandre Bouchard-C\u00f4t\u00e9", "author": "Saifuddin Syed; Vittorio Romaniello; Trevor Campbell; Alexandre Bouchard-Cote", "abstract": "Parallel tempering (PT) is a class of Markov chain Monte Carlo algorithms that constructs a path of distributions annealing between a tractable reference and an intractable target, and then interchanges states along the path to improve mixing in the target. The performance of PT depends on how quickly a sample from the reference distribution makes its way to the target, which in turn depends on the particular path of annealing distributions. However, past work on PT has used only simple paths constructed from convex combinations of the reference and target log-densities. This paper begins by demonstrating that this path performs poorly in the setting where the reference and target are nearly mutually singular. To address this issue, we expand the framework of PT to general families of paths, formulate the choice of path as an optimization problem that admits tractable gradient estimates, and propose a flexible new family of spline interpolation paths for use in practice. Theoretical and empirical results both demonstrate that our proposed methodology breaks previously-established upper performance limits for traditional paths.", "bibtex": "@InProceedings{pmlr-v139-syed21a,\n title = \t {Parallel tempering on optimized paths},\n author = {Syed, Saifuddin and Romaniello, Vittorio and Campbell, Trevor and Bouchard-Cote, Alexandre},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10033--10042},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/syed21a/syed21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/syed21a.html},\n abstract = \t {Parallel tempering (PT) is a class of Markov chain Monte Carlo algorithms that constructs a path of distributions annealing between a tractable reference and an intractable target, and then interchanges states along the path to improve mixing in the target. The performance of PT depends on how quickly a sample from the reference distribution makes its way to the target, which in turn depends on the particular path of annealing distributions. However, past work on PT has used only simple paths constructed from convex combinations of the reference and target log-densities. This paper begins by demonstrating that this path performs poorly in the setting where the reference and target are nearly mutually singular. To address this issue, we expand the framework of PT to general families of paths, formulate the choice of path as an optimization problem that admits tractable gradient estimates, and propose a flexible new family of spline interpolation paths for use in practice. Theoretical and empirical results both demonstrate that our proposed methodology breaks previously-established upper performance limits for traditional paths.}\n}", "pdf": "http://proceedings.mlr.press/v139/syed21a/syed21a.pdf", "supp": "", "pdf_size": 906183, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14697506612657062549&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "Department of Statistics, University of British Columbia, Vancouver, Canada; Department of Statistics, University of British Columbia, Vancouver, Canada; Department of Statistics, University of British Columbia, Vancouver, Canada; Department of Statistics, University of British Columbia, Vancouver, Canada", "aff_domain": "stat.ubc.ca;stat.ubc.ca; ; ", "email": "stat.ubc.ca;stat.ubc.ca; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/syed21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of British Columbia", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ubc.ca", "aff_unique_abbr": "UBC", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Vancouver", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "title": "Parallelizing Legendre Memory Unit Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9529", "id": "9529", "proceeding": "http://proceedings.mlr.press/v139/chilkuri21a.html", "slides": "", "author_site": "Narsimha Reddy Chilkuri, Chris Eliasmith", "author": "Narsimha Reddy Chilkuri; Chris Eliasmith", "abstract": "Recently, a new recurrent neural network (RNN) named the Legendre Memory Unit (LMU) was proposed and shown to achieve state-of-the-art performance on several benchmark datasets. Here we leverage the linear time-invariant (LTI) memory component of the LMU to construct a simplified variant that can be parallelized during training (and yet executed as an RNN during inference), resulting in up to 200 times faster training. We note that our efficient parallelizing scheme is general and is applicable to any deep network whose recurrent components are linear dynamical systems. We demonstrate the improved accuracy of our new architecture compared to the original LMU and a variety of published LSTM and transformer networks across seven benchmarks. For instance, our LMU sets a new state-of-the-art result on psMNIST, and uses half the parameters while outperforming DistilBERT and LSTM models on IMDB sentiment analysis.", "bibtex": "@InProceedings{pmlr-v139-chilkuri21a,\n title = \t {Parallelizing Legendre Memory Unit Training},\n author = {Chilkuri, Narsimha Reddy and Eliasmith, Chris},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1898--1907},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chilkuri21a/chilkuri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chilkuri21a.html},\n abstract = \t {Recently, a new recurrent neural network (RNN) named the Legendre Memory Unit (LMU) was proposed and shown to achieve state-of-the-art performance on several benchmark datasets. Here we leverage the linear time-invariant (LTI) memory component of the LMU to construct a simplified variant that can be parallelized during training (and yet executed as an RNN during inference), resulting in up to 200 times faster training. We note that our efficient parallelizing scheme is general and is applicable to any deep network whose recurrent components are linear dynamical systems. We demonstrate the improved accuracy of our new architecture compared to the original LMU and a variety of published LSTM and transformer networks across seven benchmarks. For instance, our LMU sets a new state-of-the-art result on psMNIST, and uses half the parameters while outperforming DistilBERT and LSTM models on IMDB sentiment analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/chilkuri21a/chilkuri21a.pdf", "supp": "", "pdf_size": 683960, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1823861922263696681&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Center for Theoretical Neuroscience, University of Waterloo; Center for Theoretical Neuroscience, University of Waterloo + Applied Brain Research", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/chilkuri21a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "University of Waterloo;Applied Brain Research", "aff_unique_dep": "Center for Theoretical Neuroscience;", "aff_unique_url": "https://uwaterloo.ca;https://www.appliedbrainresearch.com", "aff_unique_abbr": "UW;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "Canada" }, { "title": "Parameter-free Locally Accelerated Conditional Gradients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9511", "id": "9511", "proceeding": "http://proceedings.mlr.press/v139/carderera21a.html", "slides": "/media/icml-2021/Slides/9511.pdf", "author_site": "Alejandro Carderera, Jelena Diakonikolas, Cheuk Yin Lin, Sebastian Pokutta", "author": "Alejandro Carderera; Jelena Diakonikolas; Cheuk Yin Lin; Sebastian Pokutta", "abstract": "Projection-free conditional gradient (CG) methods are the algorithms of choice for constrained optimization setups in which projections are often computationally prohibitive but linear optimization over the constraint set remains computationally feasible. Unlike in projection-based methods, globally accelerated convergence rates are in general unattainable for CG. However, a very recent work on Locally accelerated CG (LaCG) has demonstrated that local acceleration for CG is possible for many settings of interest. The main downside of LaCG is that it requires knowledge of the smoothness and strong convexity parameters of the objective function. We remove this limitation by introducing a novel, Parameter-Free Locally accelerated CG (PF-LaCG) algorithm, for which we provide rigorous convergence guarantees. Our theoretical results are complemented by numerical experiments, which demonstrate local acceleration and showcase the practical improvements of PF-LaCG over non-accelerated algorithms, both in terms of iteration count and wall-clock time.", "bibtex": "@InProceedings{pmlr-v139-carderera21a,\n title = \t {Parameter-free Locally Accelerated Conditional Gradients},\n author = {Carderera, Alejandro and Diakonikolas, Jelena and Lin, Cheuk Yin and Pokutta, Sebastian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1283--1293},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/carderera21a/carderera21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/carderera21a.html},\n abstract = \t {Projection-free conditional gradient (CG) methods are the algorithms of choice for constrained optimization setups in which projections are often computationally prohibitive but linear optimization over the constraint set remains computationally feasible. Unlike in projection-based methods, globally accelerated convergence rates are in general unattainable for CG. However, a very recent work on Locally accelerated CG (LaCG) has demonstrated that local acceleration for CG is possible for many settings of interest. The main downside of LaCG is that it requires knowledge of the smoothness and strong convexity parameters of the objective function. We remove this limitation by introducing a novel, Parameter-Free Locally accelerated CG (PF-LaCG) algorithm, for which we provide rigorous convergence guarantees. Our theoretical results are complemented by numerical experiments, which demonstrate local acceleration and showcase the practical improvements of PF-LaCG over non-accelerated algorithms, both in terms of iteration count and wall-clock time.}\n}", "pdf": "http://proceedings.mlr.press/v139/carderera21a/carderera21a.pdf", "supp": "", "pdf_size": 2822164, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=307856082838062778&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/carderera21a.html" }, { "title": "Parameterless Transductive Feature Re-representation for Few-Shot Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10079", "id": "10079", "proceeding": "http://proceedings.mlr.press/v139/cui21a.html", "slides": "", "author_site": "Wentao Cui, Yuhong Guo", "author": "Wentao Cui; Yuhong Guo", "abstract": "Recent literature in few-shot learning (FSL) has shown that transductive methods often outperform their inductive counterparts. However, most transductive solutions, particularly the meta-learning based ones, require inserting trainable parameters on top of some inductive baselines to facilitate transduction. In this paper, we propose a parameterless transductive feature re-representation framework that differs from all existing solutions from the following perspectives. (1) It is widely compatible with existing FSL methods, including meta-learning and fine tuning based models. (2) The framework is simple and introduces no extra training parameters when applied to any architecture. We conduct experiments on three benchmark datasets by applying the framework to both representative meta-learning baselines and state-of-the-art FSL methods. Our framework consistently improves performances in all experiments and refreshes the state-of-the-art FSL results.", "bibtex": "@InProceedings{pmlr-v139-cui21a,\n title = \t {Parameterless Transductive Feature Re-representation for Few-Shot Learning},\n author = {Cui, Wentao and Guo, Yuhong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2212--2221},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cui21a/cui21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cui21a.html},\n abstract = \t {Recent literature in few-shot learning (FSL) has shown that transductive methods often outperform their inductive counterparts. However, most transductive solutions, particularly the meta-learning based ones, require inserting trainable parameters on top of some inductive baselines to facilitate transduction. In this paper, we propose a parameterless transductive feature re-representation framework that differs from all existing solutions from the following perspectives. (1) It is widely compatible with existing FSL methods, including meta-learning and fine tuning based models. (2) The framework is simple and introduces no extra training parameters when applied to any architecture. We conduct experiments on three benchmark datasets by applying the framework to both representative meta-learning baselines and state-of-the-art FSL methods. Our framework consistently improves performances in all experiments and refreshes the state-of-the-art FSL results.}\n}", "pdf": "http://proceedings.mlr.press/v139/cui21a/cui21a.pdf", "supp": "", "pdf_size": 557924, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5706591884073471764&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "School of Computer Science, Carleton University, Canada+Canada CIFAR AI Chair, Amii; School of Computer Science, Carleton University, Canada+Canada CIFAR AI Chair, Amii", "aff_domain": "cmail.carleton.ca;carleton.ca", "email": "cmail.carleton.ca;carleton.ca", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/cui21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Carleton University;Amii", "aff_unique_dep": "School of Computer Science;Canada CIFAR AI Chair", "aff_unique_url": "https://carleton.ca;https://amiilabs.ca", "aff_unique_abbr": "Carleton;Amii", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Canada" }, { "title": "Parametric Graph for Unimodal Ranking Bandit", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9637", "id": "9637", "proceeding": "http://proceedings.mlr.press/v139/gauthier21a.html", "slides": "", "author_site": "Camille-Sovanneary GAUTHIER, Romaric Gaudel, Elisa Fromont, Boammani Aser Lompo", "author": "Camille-Sovanneary Gauthier; Romaric Gaudel; Elisa Fromont; Boammani Aser Lompo", "abstract": "We tackle the online ranking problem of assigning $L$ items to $K$ positions on a web page in order to maximize the number of user clicks. We propose an original algorithm, easy to implement and with strong theoretical guarantees to tackle this problem in the Position-Based Model (PBM) setting, well suited for applications where items are displayed on a grid. Besides learning to rank, our algorithm, GRAB (for parametric Graph for unimodal RAnking Bandit), also learns the parameter of a compact graph over permutations of $K$ items among $L$. The logarithmic regret bound of this algorithm is a direct consequence of the unimodality property of the bandit setting with respect to the learned graph. Experiments against state-of-the-art learning algorithms which also tackle the PBM setting, show that our method is more efficient while giving regret performance on par with the best known algorithms on simulated and real life datasets.", "bibtex": "@InProceedings{pmlr-v139-gauthier21a,\n title = \t {Parametric Graph for Unimodal Ranking Bandit},\n author = {Gauthier, Camille-Sovanneary and Gaudel, Romaric and Fromont, Elisa and Lompo, Boammani Aser},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3630--3639},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gauthier21a/gauthier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gauthier21a.html},\n abstract = \t {We tackle the online ranking problem of assigning $L$ items to $K$ positions on a web page in order to maximize the number of user clicks. We propose an original algorithm, easy to implement and with strong theoretical guarantees to tackle this problem in the Position-Based Model (PBM) setting, well suited for applications where items are displayed on a grid. Besides learning to rank, our algorithm, GRAB (for parametric Graph for unimodal RAnking Bandit), also learns the parameter of a compact graph over permutations of $K$ items among $L$. The logarithmic regret bound of this algorithm is a direct consequence of the unimodality property of the bandit setting with respect to the learned graph. Experiments against state-of-the-art learning algorithms which also tackle the PBM setting, show that our method is more efficient while giving regret performance on par with the best known algorithms on simulated and real life datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/gauthier21a/gauthier21a.pdf", "supp": "", "pdf_size": 3578796, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11770711752080711321&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Louis Vuitton, F-75001 Paris, France + IRISA UMR 6074 / INRIA rba, F-35000 Rennes, France; Univ Rennes, Ensai, CNRS, CREST - UMR 9194, F-35000 Rennes, France; Univ. Rennes 1, F-35000 Rennes, France + Institut Universitaire de France, M.E.S.R.I., F-75231 Paris; ENS Rennes, F-35000 Rennes, France", "aff_domain": "louisvuitton.com; ; ;", "email": "louisvuitton.com; ; ;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gauthier21a.html", "aff_unique_index": "0+1;2;3+4;5", "aff_unique_norm": "Louis Vuitton;INRIA;Universite Rennes;University Rennes 1;Institut Universitaire de France;\u00c9cole Normale Sup\u00e9rieure de Rennes", "aff_unique_dep": ";UMR 6074;Ensai, CNRS, CREST - UMR 9194;;;", "aff_unique_url": "https://www.louisvuitton.com;https://www.inria.fr;https://www.univ-rennes1.fr;https://www.univ-rennes1.fr;https://www.iuf.cnrs.fr;https://www.ens-rennes.fr", "aff_unique_abbr": ";INRIA;Univ Rennes;UR1;IUF;ENS Rennes", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Rennes", "aff_country_unique_index": "0+0;0;0+0;0", "aff_country_unique": "France" }, { "title": "Pareto GAN: Extending the Representational Power of GANs to Heavy-Tailed Distributions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10707", "id": "10707", "proceeding": "http://proceedings.mlr.press/v139/huster21a.html", "slides": "", "author_site": "Todd Huster, Jeremy Cohen, Zinan Lin, Kevin Chan, Charles Kamhoua, Nandi O. Leslie, Cho-Yu Chiang, Vyas Sekar", "author": "Todd Huster; Jeremy Cohen; Zinan Lin; Kevin Chan; Charles Kamhoua; Nandi O. Leslie; Cho-Yu Jason Chiang; Vyas Sekar", "abstract": "Generative adversarial networks (GANs) are often billed as \"universal distribution learners\", but precisely what distributions they can represent and learn is still an open question. Heavy-tailed distributions are prevalent in many different domains such as financial risk-assessment, physics, and epidemiology. We observe that existing GAN architectures do a poor job of matching the asymptotic behavior of heavy-tailed distributions, a problem that we show stems from their construction. Additionally, common loss functions produce unstable or near-zero gradients when faced with the infinite moments and large distances between outlier points characteristic of heavy-tailed distributions. We address these problems with the Pareto GAN. A Pareto GAN leverages extreme value theory and the functional properties of neural networks to learn a distribution that matches the asymptotic behavior of the marginal distributions of the features. We identify issues with standard loss functions and propose the use of alternative metric spaces that enable stable and efficient learning. Finally, we evaluate our proposed approach on a variety of heavy-tailed datasets.", "bibtex": "@InProceedings{pmlr-v139-huster21a,\n title = \t {Pareto GAN: Extending the Representational Power of GANs to Heavy-Tailed Distributions},\n author = {Huster, Todd and Cohen, Jeremy and Lin, Zinan and Chan, Kevin and Kamhoua, Charles and Leslie, Nandi O. and Chiang, Cho-Yu Jason and Sekar, Vyas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4523--4532},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huster21a/huster21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/huster21a.html},\n abstract = \t {Generative adversarial networks (GANs) are often billed as \"universal distribution learners\", but precisely what distributions they can represent and learn is still an open question. Heavy-tailed distributions are prevalent in many different domains such as financial risk-assessment, physics, and epidemiology. We observe that existing GAN architectures do a poor job of matching the asymptotic behavior of heavy-tailed distributions, a problem that we show stems from their construction. Additionally, common loss functions produce unstable or near-zero gradients when faced with the infinite moments and large distances between outlier points characteristic of heavy-tailed distributions. We address these problems with the Pareto GAN. A Pareto GAN leverages extreme value theory and the functional properties of neural networks to learn a distribution that matches the asymptotic behavior of the marginal distributions of the features. We identify issues with standard loss functions and propose the use of alternative metric spaces that enable stable and efficient learning. Finally, we evaluate our proposed approach on a variety of heavy-tailed datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/huster21a/huster21a.pdf", "supp": "", "pdf_size": 669287, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4193739139775314187&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Perspecta Labs, Basking Ridge, NJ, USA; Perspecta Labs, Basking Ridge, NJ, USA; Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; Army Research Lab, Adelphi, Maryland, USA; Army Research Lab, Adelphi, Maryland, USA; Raytheon Technologies, Adelphi, Maryland, USA; Perspecta Labs, Basking Ridge, NJ, USA; Carnegie Mellon University, Pittsburgh, Pennsylvania, USA", "aff_domain": "perspectalabs.com; ; ; ; ; ; ; ", "email": "perspectalabs.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/huster21a.html", "aff_unique_index": "0;0;1;2;2;3;0;1", "aff_unique_norm": "Perspecta Labs;Carnegie Mellon University;Army Research Lab;Raytheon Technologies", "aff_unique_dep": ";;;", "aff_unique_url": ";https://www.cmu.edu;https://www.arl.army.mil;https://www.raytheon.com", "aff_unique_abbr": ";CMU;ARL;RTX", "aff_campus_unique_index": "0;0;1;2;2;0;1", "aff_campus_unique": "Basking Ridge;Pittsburgh;Adelphi;", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Partially Observed Exchangeable Modeling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10531", "id": "10531", "proceeding": "http://proceedings.mlr.press/v139/li21q.html", "slides": "/media/icml-2021/Slides/10531.pdf", "author_site": "Yang Li, Junier Oliva", "author": "Yang Li; Junier Oliva", "abstract": "Modeling dependencies among features is fundamental for many machine learning tasks. Although there are often multiple related instances that may be leveraged to inform conditional dependencies, typical approaches only model conditional dependencies over individual instances. In this work, we propose a novel framework, partially observed exchangeable modeling (POEx) that takes in a set of related partially observed instances and infers the conditional distribution for the unobserved dimensions over multiple elements. Our approach jointly models the intra-instance (among features in a point) and inter-instance (among multiple points in a set) dependencies in data. POEx is a general framework that encompasses many existing tasks such as point cloud expansion and few-shot generation, as well as new tasks like few-shot imputation. Despite its generality, extensive empirical evaluations show that our model achieves state-of-the-art performance across a range of applications.", "bibtex": "@InProceedings{pmlr-v139-li21q,\n title = \t {Partially Observed Exchangeable Modeling},\n author = {Li, Yang and Oliva, Junier},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6460--6470},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21q/li21q.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21q.html},\n abstract = \t {Modeling dependencies among features is fundamental for many machine learning tasks. Although there are often multiple related instances that may be leveraged to inform conditional dependencies, typical approaches only model conditional dependencies over individual instances. In this work, we propose a novel framework, partially observed exchangeable modeling (POEx) that takes in a set of related partially observed instances and infers the conditional distribution for the unobserved dimensions over multiple elements. Our approach jointly models the intra-instance (among features in a point) and inter-instance (among multiple points in a set) dependencies in data. POEx is a general framework that encompasses many existing tasks such as point cloud expansion and few-shot generation, as well as new tasks like few-shot imputation. Despite its generality, extensive empirical evaluations show that our model achieves state-of-the-art performance across a range of applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21q/li21q.pdf", "supp": "", "pdf_size": 6143917, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5750953591020377368&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA; Department of Computer Science, University of North Carolina at Chapel Hill, Chapel Hill, NC, USA", "aff_domain": "cs.unc.edu;cs.unc.edu", "email": "cs.unc.edu;cs.unc.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21q.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of North Carolina at Chapel Hill", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.unc.edu", "aff_unique_abbr": "UNC Chapel Hill", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Chapel Hill", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Path Planning using Neural A* Search", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9055", "id": "9055", "proceeding": "http://proceedings.mlr.press/v139/yonetani21a.html", "slides": "/media/icml-2021/Slides/9055.pdf", "author_site": "Ryo Yonetani, Tatsunori Taniai, Mohammadamin Barekatain, Mai Nishimura, Asako Kanezaki", "author": "Ryo Yonetani; Tatsunori Taniai; Mohammadamin Barekatain; Mai Nishimura; Asako Kanezaki", "abstract": "We present Neural A*, a novel data-driven search method for path planning problems. Despite the recent increasing attention to data-driven path planning, machine learning approaches to search-based planning are still challenging due to the discrete nature of search algorithms. In this work, we reformulate a canonical A* search algorithm to be differentiable and couple it with a convolutional encoder to form an end-to-end trainable neural network planner. Neural A* solves a path planning problem by encoding a problem instance to a guidance map and then performing the differentiable A* search with the guidance map. By learning to match the search results with ground-truth paths provided by experts, Neural A* can produce a path consistent with the ground truth accurately and efficiently. Our extensive experiments confirmed that Neural A* outperformed state-of-the-art data-driven planners in terms of the search optimality and efficiency trade-off. Furthermore, Neural A* successfully predicted realistic human trajectories by directly performing search-based planning on natural image inputs.", "bibtex": "@InProceedings{pmlr-v139-yonetani21a,\n title = \t {Path Planning using Neural A* Search},\n author = {Yonetani, Ryo and Taniai, Tatsunori and Barekatain, Mohammadamin and Nishimura, Mai and Kanezaki, Asako},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12029--12039},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yonetani21a/yonetani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yonetani21a.html},\n abstract = \t {We present Neural A*, a novel data-driven search method for path planning problems. Despite the recent increasing attention to data-driven path planning, machine learning approaches to search-based planning are still challenging due to the discrete nature of search algorithms. In this work, we reformulate a canonical A* search algorithm to be differentiable and couple it with a convolutional encoder to form an end-to-end trainable neural network planner. Neural A* solves a path planning problem by encoding a problem instance to a guidance map and then performing the differentiable A* search with the guidance map. By learning to match the search results with ground-truth paths provided by experts, Neural A* can produce a path consistent with the ground truth accurately and efficiently. Our extensive experiments confirmed that Neural A* outperformed state-of-the-art data-driven planners in terms of the search optimality and efficiency trade-off. Furthermore, Neural A* successfully predicted realistic human trajectories by directly performing search-based planning on natural image inputs.}\n}", "pdf": "http://proceedings.mlr.press/v139/yonetani21a/yonetani21a.pdf", "supp": "", "pdf_size": 2096449, "gs_citation": 129, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=997109174991202847&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "OMRON SINIC X, Tokyo, Japan; OMRON SINIC X, Tokyo, Japan + DeepMind, London, UK; OMRON SINIC X, Tokyo, Japan; OMRON SINIC X, Tokyo, Japan; Tokyo Institute of Technology, Tokyo, Japan", "aff_domain": "sinicx.com; ; ; ; ", "email": "sinicx.com; ; ; ; ", "github": "", "project": "https://omron-sinicx.github.io/neural-astar/", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/yonetani21a.html", "aff_unique_index": "0;0+1;0;0;2", "aff_unique_norm": "OMRON Corporation;DeepMind;Tokyo Institute of Technology", "aff_unique_dep": "OMRON SINIC X;;", "aff_unique_url": "https://www.omron.com;https://deepmind.com;https://www.titech.ac.jp", "aff_unique_abbr": "OMRON;DeepMind;Titech", "aff_campus_unique_index": "0;0+1;0;0;0", "aff_campus_unique": "Tokyo;London", "aff_country_unique_index": "0;0+1;0;0;0", "aff_country_unique": "Japan;United Kingdom" }, { "title": "Perceiver: General Perception with Iterative Attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10291", "id": "10291", "proceeding": "http://proceedings.mlr.press/v139/jaegle21a.html", "slides": "", "author_site": "Andrew Jaegle, Felix Axel Gimeno Gil, Andy Brock, Oriol Vinyals, Andrew Zisserman, Joao Carreira", "author": "Andrew Jaegle; Felix Gimeno; Andy Brock; Oriol Vinyals; Andrew Zisserman; Joao Carreira", "abstract": "Biological systems understand the world by simultaneously processing high-dimensional inputs from modalities as diverse as vision, audition, touch, proprioception, etc. The perception models used in deep learning on the other hand are designed for individual modalities, often relying on domain-specific assumptions such as the local grid structures exploited by virtually all existing vision models. These priors introduce helpful inductive biases, but also lock models to individual modalities. In this paper we introduce the Perceiver {\u2013} a model that builds upon Transformers and hence makes few architectural assumptions about the relationship between its inputs, but that also scales to hundreds of thousands of inputs, like ConvNets. The model leverages an asymmetric attention mechanism to iteratively distill inputs into a tight latent bottleneck, allowing it to scale to handle very large inputs. We show that this architecture is competitive with or outperforms strong, specialized models on classification tasks across various modalities: images, point clouds, audio, video and video+audio. The Perceiver obtains performance comparable to ResNet-50 and ViT on ImageNet without 2D convolutions by directly attending to 50,000 pixels. It is also competitive in all modalities in AudioSet.", "bibtex": "@InProceedings{pmlr-v139-jaegle21a,\n title = \t {Perceiver: General Perception with Iterative Attention},\n author = {Jaegle, Andrew and Gimeno, Felix and Brock, Andy and Vinyals, Oriol and Zisserman, Andrew and Carreira, Joao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4651--4664},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jaegle21a/jaegle21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jaegle21a.html},\n abstract = \t {Biological systems understand the world by simultaneously processing high-dimensional inputs from modalities as diverse as vision, audition, touch, proprioception, etc. The perception models used in deep learning on the other hand are designed for individual modalities, often relying on domain-specific assumptions such as the local grid structures exploited by virtually all existing vision models. These priors introduce helpful inductive biases, but also lock models to individual modalities. In this paper we introduce the Perceiver {\u2013} a model that builds upon Transformers and hence makes few architectural assumptions about the relationship between its inputs, but that also scales to hundreds of thousands of inputs, like ConvNets. The model leverages an asymmetric attention mechanism to iteratively distill inputs into a tight latent bottleneck, allowing it to scale to handle very large inputs. We show that this architecture is competitive with or outperforms strong, specialized models on classification tasks across various modalities: images, point clouds, audio, video and video+audio. The Perceiver obtains performance comparable to ResNet-50 and ViT on ImageNet without 2D convolutions by directly attending to 50,000 pixels. It is also competitive in all modalities in AudioSet.}\n}", "pdf": "http://proceedings.mlr.press/v139/jaegle21a/jaegle21a.pdf", "supp": "", "pdf_size": 2577803, "gs_citation": 1234, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8704237515510088771&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind \u2013 London, UK; DeepMind \u2013 London, UK; DeepMind \u2013 London, UK; DeepMind \u2013 London, UK; DeepMind \u2013 London, UK; DeepMind \u2013 London, UK", "aff_domain": "deepmind.com; ; ; ; ; ", "email": "deepmind.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/jaegle21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Permutation Weighting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9133", "id": "9133", "proceeding": "http://proceedings.mlr.press/v139/arbour21a.html", "slides": "", "author_site": "David Arbour, Drew Dimmery, Arjun Sondhi", "author": "David Arbour; Drew Dimmery; Arjun Sondhi", "abstract": "A commonly applied approach for estimating causal effects from observational data is to apply weights which render treatments independent of observed pre-treatment covariates. Recently emphasis has been placed on deriving balancing weights which explicitly target this independence condition. In this work we introduce permutation weighting, a method for estimating balancing weights using a standard binary classifier (regardless of cardinality of treatment). A large class of probabilistic classifiers may be used in this method; the choice of loss for the classifier implies the particular definition of balance. We bound bias and variance in terms of the excess risk of the classifier, show that these disappear asymptotically, and demonstrate that our classification problem directly minimizes imbalance. Additionally, hyper-parameter tuning and model selection can be performed with standard cross-validation methods. Empirical evaluations indicate that permutation weighting provides favorable performance in comparison to existing methods.", "bibtex": "@InProceedings{pmlr-v139-arbour21a,\n title = \t {Permutation Weighting},\n author = {Arbour, David and Dimmery, Drew and Sondhi, Arjun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {331--341},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/arbour21a/arbour21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/arbour21a.html},\n abstract = \t {A commonly applied approach for estimating causal effects from observational data is to apply weights which render treatments independent of observed pre-treatment covariates. Recently emphasis has been placed on deriving balancing weights which explicitly target this independence condition. In this work we introduce permutation weighting, a method for estimating balancing weights using a standard binary classifier (regardless of cardinality of treatment). A large class of probabilistic classifiers may be used in this method; the choice of loss for the classifier implies the particular definition of balance. We bound bias and variance in terms of the excess risk of the classifier, show that these disappear asymptotically, and demonstrate that our classification problem directly minimizes imbalance. Additionally, hyper-parameter tuning and model selection can be performed with standard cross-validation methods. Empirical evaluations indicate that permutation weighting provides favorable performance in comparison to existing methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/arbour21a/arbour21a.pdf", "supp": "", "pdf_size": 671693, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15601194996066528241&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Adobe Research, San Jose, CA, USA+Work carried out while at Facebook Core Data Science, Menlo Park, CA, USA+Forschungsverbund Data Science, University of Vienna, Vienna, AT; Adobe Research, San Jose, CA, USA+Work carried out while at Facebook Core Data Science, Menlo Park, CA, USA+Forschungsverbund Data Science, University of Vienna, Vienna, AT; Flatiron Health, New York, NY, USA", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/arbour21a.html", "aff_unique_index": "0+1+2;0+1+2;3", "aff_unique_norm": "Adobe;Meta;University of Vienna;Flatiron Health", "aff_unique_dep": "Adobe Research;Core Data Science;Forschungsverbund Data Science;", "aff_unique_url": "https://research.adobe.com;https://www.facebook.com;https://www.univie.ac.at;https://www.flatiron.com", "aff_unique_abbr": "Adobe;FB;Uni Vienna;", "aff_campus_unique_index": "0+1+2;0+1+2;3", "aff_campus_unique": "San Jose;Menlo Park;Vienna;New York", "aff_country_unique_index": "0+0+1;0+0+1;0", "aff_country_unique": "United States;Austria" }, { "title": "Personalized Federated Learning using Hypernetworks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10453", "id": "10453", "proceeding": "http://proceedings.mlr.press/v139/shamsian21a.html", "slides": "", "author_site": "Aviv Shamsian, Aviv Navon, Ethan Fetaya, Gal Chechik", "author": "Aviv Shamsian; Aviv Navon; Ethan Fetaya; Gal Chechik", "abstract": "Personalized federated learning is tasked with training machine learning models for multiple clients, each with its own data distribution. The goal is to train personalized models collaboratively while accounting for data disparities across clients and reducing communication costs. We propose a novel approach to this problem using hypernetworks, termed pFedHN for personalized Federated HyperNetworks. In this approach, a central hypernetwork model is trained to generate a set of models, one model for each client. This architecture provides effective parameter sharing across clients while maintaining the capacity to generate unique and diverse personal models. Furthermore, since hypernetwork parameters are never transmitted, this approach decouples the communication cost from the trainable model size. We test pFedHN empirically in several personalized federated learning challenges and find that it outperforms previous methods. Finally, since hypernetworks share information across clients, we show that pFedHN can generalize better to new clients whose distributions differ from any client observed during training.", "bibtex": "@InProceedings{pmlr-v139-shamsian21a,\n title = \t {Personalized Federated Learning using Hypernetworks},\n author = {Shamsian, Aviv and Navon, Aviv and Fetaya, Ethan and Chechik, Gal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9489--9502},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shamsian21a/shamsian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shamsian21a.html},\n abstract = \t {Personalized federated learning is tasked with training machine learning models for multiple clients, each with its own data distribution. The goal is to train personalized models collaboratively while accounting for data disparities across clients and reducing communication costs. We propose a novel approach to this problem using hypernetworks, termed pFedHN for personalized Federated HyperNetworks. In this approach, a central hypernetwork model is trained to generate a set of models, one model for each client. This architecture provides effective parameter sharing across clients while maintaining the capacity to generate unique and diverse personal models. Furthermore, since hypernetwork parameters are never transmitted, this approach decouples the communication cost from the trainable model size. We test pFedHN empirically in several personalized federated learning challenges and find that it outperforms previous methods. Finally, since hypernetworks share information across clients, we show that pFedHN can generalize better to new clients whose distributions differ from any client observed during training.}\n}", "pdf": "http://proceedings.mlr.press/v139/shamsian21a/shamsian21a.pdf", "supp": "", "pdf_size": 2111777, "gs_citation": 421, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9364037892005853502&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Bar-Ilan University, Ramat Gan, Israel; Bar-Ilan University, Ramat Gan, Israel; Bar-Ilan University, Ramat Gan, Israel; Bar-Ilan University, Ramat Gan, Israel + Nvidia, Tel-Aviv, Israel", "aff_domain": "live.biu.ac.il;biu.ac.il; ; ", "email": "live.biu.ac.il;biu.ac.il; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/shamsian21a.html", "aff_unique_index": "0;0;0;0+1", "aff_unique_norm": "Bar-Ilan University;NVIDIA", "aff_unique_dep": ";Nvidia", "aff_unique_url": "https://www.biu.ac.il;https://www.nvidia.com", "aff_unique_abbr": "BIU;NVDA", "aff_campus_unique_index": "0;0;0;0+1", "aff_campus_unique": "Ramat Gan;Tel-Aviv", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "Israel" }, { "title": "Phase Transitions, Distance Functions, and Implicit Neural Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10735", "id": "10735", "proceeding": "http://proceedings.mlr.press/v139/lipman21a.html", "slides": "", "author": "Yaron Lipman", "abstract": "Representing surfaces as zero level sets of neural networks recently emerged as a powerful modeling paradigm, named Implicit Neural Representations (INRs), serving numerous downstream applications in geometric deep learning and 3D vision. Training INRs previously required choosing between occupancy and distance function representation and different losses with unknown limit behavior and/or bias. In this paper we draw inspiration from the theory of phase transitions of fluids and suggest a loss for training INRs that learns a density function that converges to a proper occupancy function, while its log transform converges to a distance function. Furthermore, we analyze the limit minimizer of this loss showing it satisfies the reconstruction constraints and has minimal surface perimeter, a desirable inductive bias for surface reconstruction. Training INRs with this new loss leads to state-of-the-art reconstructions on a standard benchmark.", "bibtex": "@InProceedings{pmlr-v139-lipman21a,\n title = \t {Phase Transitions, Distance Functions, and Implicit Neural Representations},\n author = {Lipman, Yaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6702--6712},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lipman21a/lipman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lipman21a.html},\n abstract = \t {Representing surfaces as zero level sets of neural networks recently emerged as a powerful modeling paradigm, named Implicit Neural Representations (INRs), serving numerous downstream applications in geometric deep learning and 3D vision. Training INRs previously required choosing between occupancy and distance function representation and different losses with unknown limit behavior and/or bias. In this paper we draw inspiration from the theory of phase transitions of fluids and suggest a loss for training INRs that learns a density function that converges to a proper occupancy function, while its log transform converges to a distance function. Furthermore, we analyze the limit minimizer of this loss showing it satisfies the reconstruction constraints and has minimal surface perimeter, a desirable inductive bias for surface reconstruction. Training INRs with this new loss leads to state-of-the-art reconstructions on a standard benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/lipman21a/lipman21a.pdf", "supp": "", "pdf_size": 9521381, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6000374662325804671&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/lipman21a.html" }, { "title": "Phasic Policy Gradient", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8475", "id": "8475", "proceeding": "http://proceedings.mlr.press/v139/cobbe21a.html", "slides": "", "author_site": "Karl Cobbe, Jacob Hilton, Oleg Klimov, John Schulman", "author": "Karl W Cobbe; Jacob Hilton; Oleg Klimov; John Schulman", "abstract": "We introduce Phasic Policy Gradient (PPG), a reinforcement learning framework which modifies traditional on-policy actor-critic methods by separating policy and value function training into distinct phases. In prior methods, one must choose between using a shared network or separate networks to represent the policy and value function. Using separate networks avoids interference between objectives, while using a shared network allows useful features to be shared. PPG is able to achieve the best of both worlds by splitting optimization into two phases, one that advances training and one that distills features. PPG also enables the value function to be more aggressively optimized with a higher level of sample reuse. Compared to PPO, we find that PPG significantly improves sample efficiency on the challenging Procgen Benchmark.", "bibtex": "@InProceedings{pmlr-v139-cobbe21a,\n title = \t {Phasic Policy Gradient},\n author = {Cobbe, Karl W and Hilton, Jacob and Klimov, Oleg and Schulman, John},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2020--2027},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cobbe21a/cobbe21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cobbe21a.html},\n abstract = \t {We introduce Phasic Policy Gradient (PPG), a reinforcement learning framework which modifies traditional on-policy actor-critic methods by separating policy and value function training into distinct phases. In prior methods, one must choose between using a shared network or separate networks to represent the policy and value function. Using separate networks avoids interference between objectives, while using a shared network allows useful features to be shared. PPG is able to achieve the best of both worlds by splitting optimization into two phases, one that advances training and one that distills features. PPG also enables the value function to be more aggressively optimized with a higher level of sample reuse. Compared to PPO, we find that PPG significantly improves sample efficiency on the challenging Procgen Benchmark.}\n}", "pdf": "http://proceedings.mlr.press/v139/cobbe21a/cobbe21a.pdf", "supp": "", "pdf_size": 1273099, "gs_citation": 214, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10786895332065637304&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "OpenAI; OpenAI; OpenAI; OpenAI", "aff_domain": "openai.com; ; ; ", "email": "openai.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cobbe21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "OpenAI", "aff_unique_dep": "", "aff_unique_url": "https://openai.com", "aff_unique_abbr": "OpenAI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "PipeTransformer: Automated Elastic Pipelining for Distributed Training of Large-scale Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9595", "id": "9595", "proceeding": "http://proceedings.mlr.press/v139/he21a.html", "slides": "", "author_site": "Chaoyang He, Shen Li, Mahdi Soltanolkotabi, Salman Avestimehr", "author": "Chaoyang He; Shen Li; Mahdi Soltanolkotabi; Salman Avestimehr", "abstract": "The size of Transformer models is growing at an unprecedented rate. It has taken less than one year to reach trillion-level parameters since the release of GPT-3 (175B). Training such models requires both substantial engineering efforts and enormous computing resources, which are luxuries most research teams cannot afford. In this paper, we propose PipeTransformer, which leverages automated elastic pipelining for efficient distributed training of Transformer models. In PipeTransformer, we design an adaptive on the fly freeze algorithm that can identify and freeze some layers gradually during training, and an elastic pipelining system that can dynamically allocate resources to train the remaining active layers. More specifically, PipeTransformer automatically excludes frozen layers from the pipeline, packs active layers into fewer GPUs, and forks more replicas to increase data-parallel width. We evaluate PipeTransformer using Vision Transformer (ViT) on ImageNet and BERT on SQuAD and GLUE datasets. Our results show that compared to the state-of-the-art baseline, PipeTransformer attains up to 2.83-fold speedup without losing accuracy. We also provide various performance analyses for a more comprehensive understanding of our algorithmic and system-wise design. Finally, we have modularized our training system with flexible APIs and made the source code publicly available at https://DistML.ai.", "bibtex": "@InProceedings{pmlr-v139-he21a,\n title = \t {PipeTransformer: Automated Elastic Pipelining for Distributed Training of Large-scale Models},\n author = {He, Chaoyang and Li, Shen and Soltanolkotabi, Mahdi and Avestimehr, Salman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4150--4159},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/he21a/he21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/he21a.html},\n abstract = \t {The size of Transformer models is growing at an unprecedented rate. It has taken less than one year to reach trillion-level parameters since the release of GPT-3 (175B). Training such models requires both substantial engineering efforts and enormous computing resources, which are luxuries most research teams cannot afford. In this paper, we propose PipeTransformer, which leverages automated elastic pipelining for efficient distributed training of Transformer models. In PipeTransformer, we design an adaptive on the fly freeze algorithm that can identify and freeze some layers gradually during training, and an elastic pipelining system that can dynamically allocate resources to train the remaining active layers. More specifically, PipeTransformer automatically excludes frozen layers from the pipeline, packs active layers into fewer GPUs, and forks more replicas to increase data-parallel width. We evaluate PipeTransformer using Vision Transformer (ViT) on ImageNet and BERT on SQuAD and GLUE datasets. Our results show that compared to the state-of-the-art baseline, PipeTransformer attains up to 2.83-fold speedup without losing accuracy. We also provide various performance analyses for a more comprehensive understanding of our algorithmic and system-wise design. Finally, we have modularized our training system with flexible APIs and made the source code publicly available at https://DistML.ai.}\n}", "pdf": "http://proceedings.mlr.press/v139/he21a/he21a.pdf", "supp": "", "pdf_size": 3435546, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3163070023450247392&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of Southern California; Facebook AI Research; University of Southern California; University of Southern California", "aff_domain": "usc.edu; ; ; ", "email": "usc.edu; ; ; ", "github": "", "project": "https://DistML.ai", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/he21a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Southern California;Meta", "aff_unique_dep": ";Facebook AI Research", "aff_unique_url": "https://www.usc.edu;https://research.facebook.com", "aff_unique_abbr": "USC;FAIR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "PixelTransformer: Sample Conditioned Signal Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8635", "id": "8635", "proceeding": "http://proceedings.mlr.press/v139/tulsiani21a.html", "slides": "/media/icml-2021/Slides/8635.pdf", "author_site": "Shubham Tulsiani, Abhinav Gupta", "author": "Shubham Tulsiani; Abhinav Gupta", "abstract": "We propose a generative model that can infer a distribution for the underlying spatial signal conditioned on sparse samples e.g. plausible images given a few observed pixels. In contrast to sequential autoregressive generative models, our model allows conditioning on arbitrary samples and can answer distributional queries for any location. We empirically validate our approach across three image datasets and show that we learn to generate diverse and meaningful samples, with the distribution variance reducing given more observed pixels. We also show that our approach is applicable beyond images and can allow generating other types of spatial outputs e.g. polynomials, 3D shapes, and videos.", "bibtex": "@InProceedings{pmlr-v139-tulsiani21a,\n title = \t {PixelTransformer: Sample Conditioned Signal Generation},\n author = {Tulsiani, Shubham and Gupta, Abhinav},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10455--10464},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tulsiani21a/tulsiani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tulsiani21a.html},\n abstract = \t {We propose a generative model that can infer a distribution for the underlying spatial signal conditioned on sparse samples e.g. plausible images given a few observed pixels. In contrast to sequential autoregressive generative models, our model allows conditioning on arbitrary samples and can answer distributional queries for any location. We empirically validate our approach across three image datasets and show that we learn to generate diverse and meaningful samples, with the distribution variance reducing given more observed pixels. We also show that our approach is applicable beyond images and can allow generating other types of spatial outputs e.g. polynomials, 3D shapes, and videos.}\n}", "pdf": "http://proceedings.mlr.press/v139/tulsiani21a/tulsiani21a.pdf", "supp": "", "pdf_size": 8513038, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15694992814665504118&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Facebook AI Research; Facebook AI Research + Carnegie Mellon University", "aff_domain": "fb.com; ", "email": "fb.com; ", "github": "https://shubhtuls.github.io/PixelTransformer/", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/tulsiani21a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "Meta;Carnegie Mellon University", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.cmu.edu", "aff_unique_abbr": "FAIR;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "title": "Pointwise Binary Classification with Pairwise Confidence Comparisons", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9839", "id": "9839", "proceeding": "http://proceedings.mlr.press/v139/feng21d.html", "slides": "", "author_site": "Lei Feng, Senlin Shu, Nan Lu, Bo Han, Miao Xu, Gang Niu, Bo An, Masashi Sugiyama", "author": "Lei Feng; Senlin Shu; Nan Lu; Bo Han; Miao Xu; Gang Niu; Bo An; Masashi Sugiyama", "abstract": "To alleviate the data requirement for training effective binary classifiers in binary classification, many weakly supervised learning settings have been proposed. Among them, some consider using pairwise but not pointwise labels, when pointwise labels are not accessible due to privacy, confidentiality, or security reasons. However, as a pairwise label denotes whether or not two data points share a pointwise label, it cannot be easily collected if either point is equally likely to be positive or negative. Thus, in this paper, we propose a novel setting called pairwise comparison (Pcomp) classification, where we have only pairs of unlabeled data that we know one is more likely to be positive than the other. Firstly, we give a Pcomp data generation process, derive an unbiased risk estimator (URE) with theoretical guarantee, and further improve URE using correction functions. Secondly, we link Pcomp classification to noisy-label learning to develop a progressive URE and improve it by imposing consistency regularization. Finally, we demonstrate by experiments the effectiveness of our methods, which suggests Pcomp is a valuable and practically useful type of pairwise supervision besides the pairwise label.", "bibtex": "@InProceedings{pmlr-v139-feng21d,\n title = \t {Pointwise Binary Classification with Pairwise Confidence Comparisons},\n author = {Feng, Lei and Shu, Senlin and Lu, Nan and Han, Bo and Xu, Miao and Niu, Gang and An, Bo and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3252--3262},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21d/feng21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21d.html},\n abstract = \t {To alleviate the data requirement for training effective binary classifiers in binary classification, many weakly supervised learning settings have been proposed. Among them, some consider using pairwise but not pointwise labels, when pointwise labels are not accessible due to privacy, confidentiality, or security reasons. However, as a pairwise label denotes whether or not two data points share a pointwise label, it cannot be easily collected if either point is equally likely to be positive or negative. Thus, in this paper, we propose a novel setting called pairwise comparison (Pcomp) classification, where we have only pairs of unlabeled data that we know one is more likely to be positive than the other. Firstly, we give a Pcomp data generation process, derive an unbiased risk estimator (URE) with theoretical guarantee, and further improve URE using correction functions. Secondly, we link Pcomp classification to noisy-label learning to develop a progressive URE and improve it by imposing consistency regularization. Finally, we demonstrate by experiments the effectiveness of our methods, which suggests Pcomp is a valuable and practically useful type of pairwise supervision besides the pairwise label.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21d/feng21d.pdf", "supp": "", "pdf_size": 3159964, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6083909511444479074&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/feng21d.html" }, { "title": "Poisson-Randomised DirBN: Large Mutation is Needed in Dirichlet Belief Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9425", "id": "9425", "proceeding": "http://proceedings.mlr.press/v139/fan21a.html", "slides": "/media/icml-2021/Slides/9425.pdf", "author_site": "Xuhui Fan, Bin Li, Yaqiong Li, Scott SIsson", "author": "Xuhui Fan; Bin Li; Yaqiong Li; Scott A. Sisson", "abstract": "The Dirichlet Belief Network\u00a0(DirBN) was recently proposed as a promising deep generative model to learn interpretable deep latent distributions for objects. However, its current representation capability is limited since its latent distributions across different layers is prone to form similar patterns and can thus hardly use multi-layer structure to form flexible distributions. In this work, we propose Poisson-randomised Dirichlet Belief Networks (Pois-DirBN), which allows large mutations for the latent distributions across layers to enlarge the representation capability. Based on our key idea of inserting Poisson random variables in the layer-wise connection, Pois-DirBN first introduces a component-wise propagation mechanism to enable latent distributions to have large variations across different layers. Then, we develop a layer-wise Gibbs sampling algorithm to infer the latent distributions, leading to a larger number of effective layers compared to DirBN. In addition, we integrate out latent distributions and form a multi-stochastic deep integer network, which provides an alternative view on Pois-DirBN. We apply Pois-DirBN to relational modelling and validate its effectiveness through improved link prediction performance and more interpretable latent distribution visualisations. The code can be downloaded at https://github.com/xuhuifan/Pois_DirBN.", "bibtex": "@InProceedings{pmlr-v139-fan21a,\n title = \t {Poisson-Randomised DirBN: Large Mutation is Needed in Dirichlet Belief Networks},\n author = {Fan, Xuhui and Li, Bin and Li, Yaqiong and Sisson, Scott A.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3068--3077},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fan21a/fan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fan21a.html},\n abstract = \t {The Dirichlet Belief Network\u00a0(DirBN) was recently proposed as a promising deep generative model to learn interpretable deep latent distributions for objects. However, its current representation capability is limited since its latent distributions across different layers is prone to form similar patterns and can thus hardly use multi-layer structure to form flexible distributions. In this work, we propose Poisson-randomised Dirichlet Belief Networks (Pois-DirBN), which allows large mutations for the latent distributions across layers to enlarge the representation capability. Based on our key idea of inserting Poisson random variables in the layer-wise connection, Pois-DirBN first introduces a component-wise propagation mechanism to enable latent distributions to have large variations across different layers. Then, we develop a layer-wise Gibbs sampling algorithm to infer the latent distributions, leading to a larger number of effective layers compared to DirBN. In addition, we integrate out latent distributions and form a multi-stochastic deep integer network, which provides an alternative view on Pois-DirBN. We apply Pois-DirBN to relational modelling and validate its effectiveness through improved link prediction performance and more interpretable latent distribution visualisations. The code can be downloaded at https://github.com/xuhuifan/Pois_DirBN.}\n}", "pdf": "http://proceedings.mlr.press/v139/fan21a/fan21a.pdf", "supp": "", "pdf_size": 756105, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1373383609635211559&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "UNSW Data Science Hub, and School of Mathematics and Statistics, University of New South Wales; Shanghai Key Laboratory of IIP, School of Computer Science, Fudan University; Australian Artificial Intelligence Institute, University of Technology, Sydney; UNSW Data Science Hub, and School of Mathematics and Statistics, University of New South Wales", "aff_domain": "unsw.edu.au;fudan.edu.cn;uts.edu.au;unsw.edu.au", "email": "unsw.edu.au;fudan.edu.cn;uts.edu.au;unsw.edu.au", "github": "https://github.com/xuhuifan/Pois_DirBN", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fan21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of New South Wales;Fudan University;University of Technology Sydney", "aff_unique_dep": "School of Mathematics and Statistics;School of Computer Science;Australian Artificial Intelligence Institute", "aff_unique_url": "https://www.unsw.edu.au;https://www.fudan.edu.cn;https://www.uts.edu.au", "aff_unique_abbr": "UNSW;Fudan;UTS", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Shanghai;Sydney", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "Australia;China" }, { "title": "Policy Analysis using Synthetic Controls in Continuous-Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8675", "id": "8675", "proceeding": "http://proceedings.mlr.press/v139/bellot21a.html", "slides": "", "author_site": "Alexis Bellot, Mihaela van der Schaar", "author": "Alexis Bellot; Mihaela van der Schaar", "abstract": "Counterfactual estimation using synthetic controls is one of the most successful recent methodological developments in causal inference. Despite its popularity, the current description only considers time series aligned across units and synthetic controls expressed as linear combinations of observed control units. We propose a continuous-time alternative that models the latent counterfactual path explicitly using the formalism of controlled differential equations. This model is directly applicable to the general setting of irregularly-aligned multivariate time series and may be optimized in rich function spaces \u2013 thereby improving on some limitations of existing approaches.", "bibtex": "@InProceedings{pmlr-v139-bellot21a,\n title = \t {Policy Analysis using Synthetic Controls in Continuous-Time},\n author = {Bellot, Alexis and van der Schaar, Mihaela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {759--768},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bellot21a/bellot21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bellot21a.html},\n abstract = \t {Counterfactual estimation using synthetic controls is one of the most successful recent methodological developments in causal inference. Despite its popularity, the current description only considers time series aligned across units and synthetic controls expressed as linear combinations of observed control units. We propose a continuous-time alternative that models the latent counterfactual path explicitly using the formalism of controlled differential equations. This model is directly applicable to the general setting of irregularly-aligned multivariate time series and may be optimized in rich function spaces \u2013 thereby improving on some limitations of existing approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/bellot21a/bellot21a.pdf", "supp": "", "pdf_size": 704090, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2140002842575594537&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of Cambridge, UK+Alan Turing Institute, UK+University of California Los Angeles, USA; University of Cambridge, UK+Alan Turing Institute, UK+University of California Los Angeles, USA", "aff_domain": "hotmail.com; ", "email": "hotmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bellot21a.html", "aff_unique_index": "0+1+2;0+1+2", "aff_unique_norm": "University of Cambridge;Alan Turing Institute;University of California, Los Angeles", "aff_unique_dep": ";;", "aff_unique_url": "https://www.cam.ac.uk;https://www.turing.ac.uk;https://www.ucla.edu", "aff_unique_abbr": "Cambridge;ATI;UCLA", "aff_campus_unique_index": "0+2;0+2", "aff_campus_unique": "Cambridge;;Los Angeles", "aff_country_unique_index": "0+0+1;0+0+1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Policy Caches with Successor Features", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9347", "id": "9347", "proceeding": "http://proceedings.mlr.press/v139/nemecek21a.html", "slides": "", "author_site": "Mark Nemecek, Ron Parr", "author": "Mark Nemecek; Ronald Parr", "abstract": "Transfer in reinforcement learning is based on the idea that it is possible to use what is learned in one task to improve the learning process in another task. For transfer between tasks which share transition dynamics but differ in reward function, successor features have been shown to be a useful representation which allows for efficient computation of action-value functions for previously-learned policies in new tasks. These functions induce policies in the new tasks, so an agent may not need to learn a new policy for each new task it encounters, especially if it is allowed some amount of suboptimality in those tasks. We present new bounds for the performance of optimal policies in a new task, as well as an approach to use these bounds to decide, when presented with a new task, whether to use cached policies or learn a new policy.", "bibtex": "@InProceedings{pmlr-v139-nemecek21a,\n title = \t {Policy Caches with Successor Features},\n author = {Nemecek, Mark and Parr, Ronald},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8025--8033},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nemecek21a/nemecek21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nemecek21a.html},\n abstract = \t {Transfer in reinforcement learning is based on the idea that it is possible to use what is learned in one task to improve the learning process in another task. For transfer between tasks which share transition dynamics but differ in reward function, successor features have been shown to be a useful representation which allows for efficient computation of action-value functions for previously-learned policies in new tasks. These functions induce policies in the new tasks, so an agent may not need to learn a new policy for each new task it encounters, especially if it is allowed some amount of suboptimality in those tasks. We present new bounds for the performance of optimal policies in a new task, as well as an approach to use these bounds to decide, when presented with a new task, whether to use cached policies or learn a new policy.}\n}", "pdf": "http://proceedings.mlr.press/v139/nemecek21a/nemecek21a.pdf", "supp": "", "pdf_size": 5504059, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1241148801969465435&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Duke University, Durham, North Carolina, USA; Department of Computer Science, Duke University, Durham, North Carolina, USA", "aff_domain": "cs.duke.edu; ", "email": "cs.duke.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/nemecek21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Durham", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Policy Gradient Bayesian Robust Optimization for Imitation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9559", "id": "9559", "proceeding": "http://proceedings.mlr.press/v139/javed21a.html", "slides": "", "author_site": "Zaynah Javed, Daniel Brown, Satvik Sharma, Jerry Zhu, Ashwin Balakrishna, Marek Petrik, Anca Dragan, Ken Goldberg", "author": "Zaynah Javed; Daniel S Brown; Satvik Sharma; Jerry Zhu; Ashwin Balakrishna; Marek Petrik; Anca Dragan; Ken Goldberg", "abstract": "The difficulty in specifying rewards for many real-world problems has led to an increased focus on learning rewards from human feedback, such as demonstrations. However, there are often many different reward functions that explain the human feedback, leaving agents with uncertainty over what the true reward function is. While most policy optimization approaches handle this uncertainty by optimizing for expected performance, many applications demand risk-averse behavior. We derive a novel policy gradient-style robust optimization approach, PG-BROIL, that optimizes a soft-robust objective that balances expected performance and risk. To the best of our knowledge, PG-BROIL is the first policy optimization algorithm robust to a distribution of reward hypotheses which can scale to continuous MDPs. Results suggest that PG-BROIL can produce a family of behaviors ranging from risk-neutral to risk-averse and outperforms state-of-the-art imitation learning algorithms when learning from ambiguous demonstrations by hedging against uncertainty, rather than seeking to uniquely identify the demonstrator\u2019s reward function.", "bibtex": "@InProceedings{pmlr-v139-javed21a,\n title = \t {Policy Gradient Bayesian Robust Optimization for Imitation Learning},\n author = {Javed, Zaynah and Brown, Daniel S and Sharma, Satvik and Zhu, Jerry and Balakrishna, Ashwin and Petrik, Marek and Dragan, Anca and Goldberg, Ken},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4785--4796},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/javed21a/javed21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/javed21a.html},\n abstract = \t {The difficulty in specifying rewards for many real-world problems has led to an increased focus on learning rewards from human feedback, such as demonstrations. However, there are often many different reward functions that explain the human feedback, leaving agents with uncertainty over what the true reward function is. While most policy optimization approaches handle this uncertainty by optimizing for expected performance, many applications demand risk-averse behavior. We derive a novel policy gradient-style robust optimization approach, PG-BROIL, that optimizes a soft-robust objective that balances expected performance and risk. To the best of our knowledge, PG-BROIL is the first policy optimization algorithm robust to a distribution of reward hypotheses which can scale to continuous MDPs. Results suggest that PG-BROIL can produce a family of behaviors ranging from risk-neutral to risk-averse and outperforms state-of-the-art imitation learning algorithms when learning from ambiguous demonstrations by hedging against uncertainty, rather than seeking to uniquely identify the demonstrator\u2019s reward function.}\n}", "pdf": "http://proceedings.mlr.press/v139/javed21a/javed21a.pdf", "supp": "", "pdf_size": 895799, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9852878961201509946&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "EECS Department, University of California, Berkeley; EECS Department, University of California, Berkeley; EECS Department, University of California, Berkeley; EECS Department, University of California, Berkeley; EECS Department, University of California, Berkeley; CS Department, University of New Hampshire; EECS Department, University of California, Berkeley; EECS Department, University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;unh.edu;berkeley.edu;berkeley.edu", "email": "berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;berkeley.edu;unh.edu;berkeley.edu;berkeley.edu", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/javed21a.html", "aff_unique_index": "0;0;0;0;0;1;0;0", "aff_unique_norm": "University of California, Berkeley;University of New Hampshire", "aff_unique_dep": "EECS Department;Computer Science Department", "aff_unique_url": "https://www.berkeley.edu;https://www.unh.edu", "aff_unique_abbr": "UC Berkeley;UNH", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Policy Information Capacity: Information-Theoretic Measure for Task Complexity in Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8571", "id": "8571", "proceeding": "http://proceedings.mlr.press/v139/furuta21a.html", "slides": "", "author_site": "Hiroki Furuta, Tatsuya Matsushima, Tadashi Kozuno, Yutaka Matsuo, Sergey Levine, Ofir Nachum, Shixiang Gu", "author": "Hiroki Furuta; Tatsuya Matsushima; Tadashi Kozuno; Yutaka Matsuo; Sergey Levine; Ofir Nachum; Shixiang Shane Gu", "abstract": "Progress in deep reinforcement learning (RL) research is largely enabled by benchmark task environments. However, analyzing the nature of those environments is often overlooked. In particular, we still do not have agreeable ways to measure the difficulty or solvability of a task, given that each has fundamentally different actions, observations, dynamics, rewards, and can be tackled with diverse RL algorithms. In this work, we propose policy information capacity (PIC) \u2013 the mutual information between policy parameters and episodic return \u2013 and policy-optimal information capacity (POIC) \u2013 between policy parameters and episodic optimality \u2013 as two environment-agnostic, algorithm-agnostic quantitative metrics for task difficulty. Evaluating our metrics across toy environments as well as continuous control benchmark tasks from OpenAI Gym and DeepMind Control Suite, we empirically demonstrate that these information-theoretic metrics have higher correlations with normalized task solvability scores than a variety of alternatives. Lastly, we show that these metrics can also be used for fast and compute-efficient optimizations of key design parameters such as reward shaping, policy architectures, and MDP properties for better solvability by RL algorithms without ever running full RL experiments.", "bibtex": "@InProceedings{pmlr-v139-furuta21a,\n title = \t {Policy Information Capacity: Information-Theoretic Measure for Task Complexity in Deep Reinforcement Learning},\n author = {Furuta, Hiroki and Matsushima, Tatsuya and Kozuno, Tadashi and Matsuo, Yutaka and Levine, Sergey and Nachum, Ofir and Gu, Shixiang Shane},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3541--3552},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/furuta21a/furuta21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/furuta21a.html},\n abstract = \t {Progress in deep reinforcement learning (RL) research is largely enabled by benchmark task environments. However, analyzing the nature of those environments is often overlooked. In particular, we still do not have agreeable ways to measure the difficulty or solvability of a task, given that each has fundamentally different actions, observations, dynamics, rewards, and can be tackled with diverse RL algorithms. In this work, we propose policy information capacity (PIC) \u2013 the mutual information between policy parameters and episodic return \u2013 and policy-optimal information capacity (POIC) \u2013 between policy parameters and episodic optimality \u2013 as two environment-agnostic, algorithm-agnostic quantitative metrics for task difficulty. Evaluating our metrics across toy environments as well as continuous control benchmark tasks from OpenAI Gym and DeepMind Control Suite, we empirically demonstrate that these information-theoretic metrics have higher correlations with normalized task solvability scores than a variety of alternatives. Lastly, we show that these metrics can also be used for fast and compute-efficient optimizations of key design parameters such as reward shaping, policy architectures, and MDP properties for better solvability by RL algorithms without ever running full RL experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/furuta21a/furuta21a.pdf", "supp": "", "pdf_size": 2668745, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4831959598163320466&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The University of Tokyo, Tokyo, Japan; The University of Tokyo, Tokyo, Japan; University of Alberta, Edmonton, Canada; The University of Tokyo, Tokyo, Japan; Google Research, Mountain View, USA; Google Research, Mountain View, USA; Google Research, Mountain View, USA", "aff_domain": "weblab.t.u-tokyo.ac.jp; ; ; ; ; ; ", "email": "weblab.t.u-tokyo.ac.jp; ; ; ; ; ; ", "github": "https://github.com/frt03/pic", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/furuta21a.html", "aff_unique_index": "0;0;1;0;2;2;2", "aff_unique_norm": "University of Tokyo;University of Alberta;Google", "aff_unique_dep": ";;Google Research", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.ualberta.ca;https://research.google", "aff_unique_abbr": "UTokyo;UAlberta;Google", "aff_campus_unique_index": "0;0;1;0;2;2;2", "aff_campus_unique": "Tokyo;Edmonton;Mountain View", "aff_country_unique_index": "0;0;1;0;2;2;2", "aff_country_unique": "Japan;Canada;United States" }, { "title": "Poolingformer: Long Document Modeling with Pooling Attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9603", "id": "9603", "proceeding": "http://proceedings.mlr.press/v139/zhang21h.html", "slides": "/media/icml-2021/Slides/9603.pdf", "author_site": "Hang ZHANG, Yeyun Gong, Yelong Shen, Weisheng Li, Jiancheng Lv, Nan Duan, Weizhu Chen", "author": "Hang Zhang; Yeyun Gong; Yelong Shen; Weisheng Li; Jiancheng Lv; Nan Duan; Weizhu Chen", "abstract": "In this paper, we introduce a two-level attention schema, Poolingformer, for long document modeling. Its first level uses a smaller sliding window pattern to aggregate information from neighbors. Its second level employs a larger window to increase receptive fields with pooling attention to reduce both computational cost and memory consumption. We first evaluate Poolingformer on two long sequence QA tasks: the monolingual NQ and the multilingual TyDi QA. Experimental results show that Poolingformer sits atop three official leaderboards measured by F1, outperforming previous state-of-the-art models by 1.9 points (79.8 vs. 77.9) on NQ long answer, 1.9 points (79.5 vs. 77.6) on TyDi QA passage answer, and 1.6 points (67.6 vs. 66.0) on TyDi QA minimal answer. We further evaluate Poolingformer on a long sequence summarization task. Experimental results on the arXiv benchmark continue to demonstrate its superior performance.", "bibtex": "@InProceedings{pmlr-v139-zhang21h,\n title = \t {Poolingformer: Long Document Modeling with Pooling Attention},\n author = {Zhang, Hang and Gong, Yeyun and Shen, Yelong and Li, Weisheng and Lv, Jiancheng and Duan, Nan and Chen, Weizhu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12437--12446},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21h/zhang21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21h.html},\n abstract = \t {In this paper, we introduce a two-level attention schema, Poolingformer, for long document modeling. Its first level uses a smaller sliding window pattern to aggregate information from neighbors. Its second level employs a larger window to increase receptive fields with pooling attention to reduce both computational cost and memory consumption. We first evaluate Poolingformer on two long sequence QA tasks: the monolingual NQ and the multilingual TyDi QA. Experimental results show that Poolingformer sits atop three official leaderboards measured by F1, outperforming previous state-of-the-art models by 1.9 points (79.8 vs. 77.9) on NQ long answer, 1.9 points (79.5 vs. 77.6) on TyDi QA passage answer, and 1.6 points (67.6 vs. 66.0) on TyDi QA minimal answer. We further evaluate Poolingformer on a long sequence summarization task. Experimental results on the arXiv benchmark continue to demonstrate its superior performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21h/zhang21h.pdf", "supp": "", "pdf_size": 656818, "gs_citation": 99, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1846292265569160491&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "College of Computer Science, Sichuan University+During Internship at MSRA; Microsoft Research Asia; Microsoft Azure AI; University of Science and Technology of China; College of Computer Science, Sichuan University; Microsoft Research Asia; Microsoft Azure AI", "aff_domain": "scu.edu.cn;microsoft.com;microsoft.com;ustc.edu.cn;scu.edu.cn;microsoft.com;microsoft.com", "email": "scu.edu.cn;microsoft.com;microsoft.com;ustc.edu.cn;scu.edu.cn;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/zhang21h.html", "aff_unique_index": "0+1;1;1;2;0;1;1", "aff_unique_norm": "Sichuan University;Microsoft;University of Science and Technology of China", "aff_unique_dep": "College of Computer Science;Microsoft Research Asia;", "aff_unique_url": "https://www.scu.edu.cn;https://www.msra.cn;http://www.ustc.edu.cn", "aff_unique_abbr": ";MSRA;USTC", "aff_campus_unique_index": ";1;1", "aff_campus_unique": ";Asia", "aff_country_unique_index": "0+0;0;1;0;0;0;1", "aff_country_unique": "China;United States" }, { "title": "PopSkipJump: Decision-Based Attack for Probabilistic Classifiers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9475", "id": "9475", "proceeding": "http://proceedings.mlr.press/v139/simon-gabriel21a.html", "slides": "", "author_site": "Carl-Johann Simon-Gabriel, Noman Ahmed Sheikh, Andreas Krause", "author": "Carl-Johann Simon-Gabriel; Noman Ahmed Sheikh; Andreas Krause", "abstract": "Most current classifiers are vulnerable to adversarial examples, small input perturbations that change the classification output. Many existing attack algorithms cover various settings, from white-box to black-box classifiers, but usually assume that the answers are deterministic and often fail when they are not. We therefore propose a new adversarial decision-based attack specifically designed for classifiers with probabilistic outputs. It is based on the HopSkipJump attack by Chen et al. (2019), a strong and query efficient decision-based attack originally designed for deterministic classifiers. Our P(robabilisticH)opSkipJump attack adapts its amount of queries to maintain HopSkipJump\u2019s original output quality across various noise levels, while converging to its query efficiency as the noise level decreases. We test our attack on various noise models, including state-of-the-art off-the-shelf randomized defenses, and show that they offer almost no extra robustness to decision-based attacks. Code is available at https://github.com/cjsg/PopSkipJump.", "bibtex": "@InProceedings{pmlr-v139-simon-gabriel21a,\n title = \t {PopSkipJump: Decision-Based Attack for Probabilistic Classifiers},\n author = {Simon-Gabriel, Carl-Johann and Sheikh, Noman Ahmed and Krause, Andreas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9712--9721},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/simon-gabriel21a/simon-gabriel21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/simon-gabriel21a.html},\n abstract = \t {Most current classifiers are vulnerable to adversarial examples, small input perturbations that change the classification output. Many existing attack algorithms cover various settings, from white-box to black-box classifiers, but usually assume that the answers are deterministic and often fail when they are not. We therefore propose a new adversarial decision-based attack specifically designed for classifiers with probabilistic outputs. It is based on the HopSkipJump attack by Chen et al. (2019), a strong and query efficient decision-based attack originally designed for deterministic classifiers. Our P(robabilisticH)opSkipJump attack adapts its amount of queries to maintain HopSkipJump\u2019s original output quality across various noise levels, while converging to its query efficiency as the noise level decreases. We test our attack on various noise models, including state-of-the-art off-the-shelf randomized defenses, and show that they offer almost no extra robustness to decision-based attacks. Code is available at https://github.com/cjsg/PopSkipJump.}\n}", "pdf": "http://proceedings.mlr.press/v139/simon-gabriel21a/simon-gabriel21a.pdf", "supp": "", "pdf_size": 736885, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8512283764080476060&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "ETH Zurich; ETH Zurich; ETH Zurich", "aff_domain": "ethz.ch; ; ", "email": "ethz.ch; ; ", "github": "https://github.com/cjsg/PopSkipJump", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/simon-gabriel21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9629", "id": "9629", "proceeding": "http://proceedings.mlr.press/v139/xie21h.html", "slides": "/media/icml-2021/Slides/9629.pdf", "author_site": "Zeke Xie, Li Yuan, Zhanxing Zhu, Masashi Sugiyama", "author": "Zeke Xie; Li Yuan; Zhanxing Zhu; Masashi Sugiyama", "abstract": "It is well-known that stochastic gradient noise (SGN) acts as implicit regularization for deep learning and is essentially important for both optimization and generalization of deep networks. Some works attempted to artificially simulate SGN by injecting random noise to improve deep learning. However, it turned out that the injected simple random noise cannot work as well as SGN, which is anisotropic and parameter-dependent. For simulating SGN at low computational costs and without changing the learning rate or batch size, we propose the Positive-Negative Momentum (PNM) approach that is a powerful alternative to conventional Momentum in classic optimizers. The introduced PNM method maintains two approximate independent momentum terms. Then, we can control the magnitude of SGN explicitly by adjusting the momentum difference. We theoretically prove the convergence guarantee and the generalization advantage of PNM over Stochastic Gradient Descent (SGD). By incorporating PNM into the two conventional optimizers, SGD with Momentum and Adam, our extensive experiments empirically verified the significant advantage of the PNM-based variants over the corresponding conventional Momentum-based optimizers. Code: \\url{https://github.com/zeke-xie/Positive-Negative-Momentum}.", "bibtex": "@InProceedings{pmlr-v139-xie21h,\n title = \t {Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve Generalization},\n author = {Xie, Zeke and Yuan, Li and Zhu, Zhanxing and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11448--11458},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21h/xie21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21h.html},\n abstract = \t {It is well-known that stochastic gradient noise (SGN) acts as implicit regularization for deep learning and is essentially important for both optimization and generalization of deep networks. Some works attempted to artificially simulate SGN by injecting random noise to improve deep learning. However, it turned out that the injected simple random noise cannot work as well as SGN, which is anisotropic and parameter-dependent. For simulating SGN at low computational costs and without changing the learning rate or batch size, we propose the Positive-Negative Momentum (PNM) approach that is a powerful alternative to conventional Momentum in classic optimizers. The introduced PNM method maintains two approximate independent momentum terms. Then, we can control the magnitude of SGN explicitly by adjusting the momentum difference. We theoretically prove the convergence guarantee and the generalization advantage of PNM over Stochastic Gradient Descent (SGD). By incorporating PNM into the two conventional optimizers, SGD with Momentum and Adam, our extensive experiments empirically verified the significant advantage of the PNM-based variants over the corresponding conventional Momentum-based optimizers. Code: \\url{https://github.com/zeke-xie/Positive-Negative-Momentum}.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21h/xie21h.pdf", "supp": "", "pdf_size": 1263616, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9647717968624963089&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The University of Tokyo; National University of Singapore; Beijing Institute of Big Data Research; RIKEN Center for AIP", "aff_domain": "ms.k.u-tokyo.ac.jp;u.nus.edu;pku.edu.cn;k.u-tokyo.ac.jp", "email": "ms.k.u-tokyo.ac.jp;u.nus.edu;pku.edu.cn;k.u-tokyo.ac.jp", "github": "https://github.com/zeke-xie/Positive-Negative-Momentum", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/xie21h.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of Tokyo;National University of Singapore;Beijing Institute of Big Data Research;RIKEN", "aff_unique_dep": ";;;Center for AIP", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.nus.edu.sg;;https://www.riken.jp", "aff_unique_abbr": "UTokyo;NUS;;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;2;0", "aff_country_unique": "Japan;Singapore;China" }, { "title": "Post-selection inference with HSIC-Lasso", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9689", "id": "9689", "proceeding": "http://proceedings.mlr.press/v139/freidling21a.html", "slides": "/media/icml-2021/Slides/9689.pdf", "author_site": "Tobias Freidling, Benjamin Poignard, H\u00e9ctor Climente-Gonz\u00e1lez, Makoto Yamada", "author": "Tobias Freidling; Benjamin Poignard; H\u00e9ctor Climente-Gonz\u00e1lez; Makoto Yamada", "abstract": "Detecting influential features in non-linear and/or high-dimensional data is a challenging and increasingly important task in machine learning. Variable selection methods have thus been gaining much attention as well as post-selection inference. Indeed, the selected features can be significantly flawed when the selection procedure is not accounted for. We propose a selective inference procedure using the so-called model-free \"HSIC-Lasso\" based on the framework of truncated Gaussians combined with the polyhedral lemma. We then develop an algorithm, which allows for low computational costs and provides a selection of the regularisation parameter. The performance of our method is illustrated by both artificial and real-world data based experiments, which emphasise a tight control of the type-I error, even for small sample sizes.", "bibtex": "@InProceedings{pmlr-v139-freidling21a,\n title = \t {Post-selection inference with HSIC-Lasso},\n author = {Freidling, Tobias and Poignard, Benjamin and Climente-Gonz{\\'a}lez, H{\\'e}ctor and Yamada, Makoto},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3439--3448},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/freidling21a/freidling21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/freidling21a.html},\n abstract = \t {Detecting influential features in non-linear and/or high-dimensional data is a challenging and increasingly important task in machine learning. Variable selection methods have thus been gaining much attention as well as post-selection inference. Indeed, the selected features can be significantly flawed when the selection procedure is not accounted for. We propose a selective inference procedure using the so-called model-free \"HSIC-Lasso\" based on the framework of truncated Gaussians combined with the polyhedral lemma. We then develop an algorithm, which allows for low computational costs and provides a selection of the regularisation parameter. The performance of our method is illustrated by both artificial and real-world data based experiments, which emphasise a tight control of the type-I error, even for small sample sizes.}\n}", "pdf": "http://proceedings.mlr.press/v139/freidling21a/freidling21a.pdf", "supp": "", "pdf_size": 722401, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10354725144319499088&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Pure Mathematics and Mathematical Statistics, University of Cambridge, Cambridge, United Kingdom; Graduate School of Economics, Osaka University, Osaka, Japan+Center for Advanced Intelligence Project (AIP), RIKEN, Kyoto, Japan; Center for Advanced Intelligence Project (AIP), RIKEN, Kyoto, Japan+Graduate School of Informatics, Kyoto University, Kyoto, Japan; Center for Advanced Intelligence Project (AIP), RIKEN, Kyoto, Japan+Graduate School of Informatics, Kyoto University, Kyoto, Japan", "aff_domain": "cam.ac.uk; ; ; ", "email": "cam.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/freidling21a.html", "aff_unique_index": "0;1+2;2+3;2+3", "aff_unique_norm": "University of Cambridge;Osaka University;RIKEN;Kyoto University", "aff_unique_dep": "Department of Pure Mathematics and Mathematical Statistics;Graduate School of Economics;Center for Advanced Intelligence Project (AIP);Graduate School of Informatics", "aff_unique_url": "https://www.cam.ac.uk;https://www.osaka-u.ac.jp;https://www.riken.jp;https://www.kyoto-u.ac.jp", "aff_unique_abbr": "Cambridge;Osaka U;RIKEN;Kyoto U", "aff_campus_unique_index": "0;1+2;2+2;2+2", "aff_campus_unique": "Cambridge;Osaka;Kyoto", "aff_country_unique_index": "0;1+1;1+1;1+1", "aff_country_unique": "United Kingdom;Japan" }, { "title": "Posterior Value Functions: Hindsight Baselines for Policy Gradient Methods", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9783", "id": "9783", "proceeding": "http://proceedings.mlr.press/v139/nota21a.html", "slides": "", "author_site": "Chris Nota, Philip Thomas, Bruno C. da Silva", "author": "Chris Nota; Philip Thomas; Bruno C. Da Silva", "abstract": "Hindsight allows reinforcement learning agents to leverage new observations to make inferences about earlier states and transitions. In this paper, we exploit the idea of hindsight and introduce posterior value functions. Posterior value functions are computed by inferring the posterior distribution over hidden components of the state in previous timesteps and can be used to construct novel unbiased baselines for policy gradient methods. Importantly, we prove that these baselines reduce (and never increase) the variance of policy gradient estimators compared to traditional state value functions. While the posterior value function is motivated by partial observability, we extend these results to arbitrary stochastic MDPs by showing that hindsight-capable agents can model stochasticity in the environment as a special case of partial observability. Finally, we introduce a pair of methods for learning posterior value functions and prove their convergence.", "bibtex": "@InProceedings{pmlr-v139-nota21a,\n title = \t {Posterior Value Functions: Hindsight Baselines for Policy Gradient Methods},\n author = {Nota, Chris and Thomas, Philip and Silva, Bruno C. Da},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8238--8247},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nota21a/nota21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nota21a.html},\n abstract = \t {Hindsight allows reinforcement learning agents to leverage new observations to make inferences about earlier states and transitions. In this paper, we exploit the idea of hindsight and introduce posterior value functions. Posterior value functions are computed by inferring the posterior distribution over hidden components of the state in previous timesteps and can be used to construct novel unbiased baselines for policy gradient methods. Importantly, we prove that these baselines reduce (and never increase) the variance of policy gradient estimators compared to traditional state value functions. While the posterior value function is motivated by partial observability, we extend these results to arbitrary stochastic MDPs by showing that hindsight-capable agents can model stochasticity in the environment as a special case of partial observability. Finally, we introduce a pair of methods for learning posterior value functions and prove their convergence.}\n}", "pdf": "http://proceedings.mlr.press/v139/nota21a/nota21a.pdf", "supp": "", "pdf_size": 821670, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6703284572255713701&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "College of Information and Computer Science, University of Massachusetts, Amherst, MA; College of Information and Computer Science, University of Massachusetts, Amherst, MA; College of Information and Computer Science, University of Massachusetts, Amherst, MA", "aff_domain": "cs.umass.edu; ;cs.umass.edu", "email": "cs.umass.edu; ;cs.umass.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nota21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "College of Information and Computer Science", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Practical and Private (Deep) Learning Without Sampling or Shuffling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9697", "id": "9697", "proceeding": "http://proceedings.mlr.press/v139/kairouz21b.html", "slides": "", "author_site": "Peter Kairouz, Brendan McMahan, Shuang Song, Om Dipakbhai Thakkar, Abhradeep Guha Thakurta, Zheng Xu", "author": "Peter Kairouz; Brendan Mcmahan; Shuang Song; Om Thakkar; Abhradeep Thakurta; Zheng Xu", "abstract": "We consider training models with differential privacy (DP) using mini-batch gradients. The existing state-of-the-art, Differentially Private Stochastic Gradient Descent (DP-SGD), requires \\emph{privacy amplification by sampling or shuffling} to obtain the best privacy/accuracy/computation trade-offs. Unfortunately, the precise requirements on exact sampling and shuffling can be hard to obtain in important practical scenarios, particularly federated learning (FL). We design and analyze a DP variant of Follow-The-Regularized-Leader (DP-FTRL) that compares favorably (both theoretically and empirically) to amplified DP-SGD, while allowing for much more flexible data access patterns. DP-FTRL does not use any form of privacy amplification.", "bibtex": "@InProceedings{pmlr-v139-kairouz21b,\n title = \t {Practical and Private (Deep) Learning Without Sampling or Shuffling},\n author = {Kairouz, Peter and Mcmahan, Brendan and Song, Shuang and Thakkar, Om and Thakurta, Abhradeep and Xu, Zheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5213--5225},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kairouz21b/kairouz21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kairouz21b.html},\n abstract = \t {We consider training models with differential privacy (DP) using mini-batch gradients. The existing state-of-the-art, Differentially Private Stochastic Gradient Descent (DP-SGD), requires \\emph{privacy amplification by sampling or shuffling} to obtain the best privacy/accuracy/computation trade-offs. Unfortunately, the precise requirements on exact sampling and shuffling can be hard to obtain in important practical scenarios, particularly federated learning (FL). We design and analyze a DP variant of Follow-The-Regularized-Leader (DP-FTRL) that compares favorably (both theoretically and empirically) to amplified DP-SGD, while allowing for much more flexible data access patterns. DP-FTRL does not use any form of privacy amplification.}\n}", "pdf": "http://proceedings.mlr.press/v139/kairouz21b/kairouz21b.pdf", "supp": "", "pdf_size": 5065783, "gs_citation": 226, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18355684827148389503&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Google; Google; Google; Google; Google; Google", "aff_domain": "google.com; ; ; ; ; ", "email": "google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/kairouz21b.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google", "aff_unique_url": "https://www.google.com", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Predict then Interpolate: A Simple Algorithm to Learn Stable Classifiers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9759", "id": "9759", "proceeding": "http://proceedings.mlr.press/v139/bao21a.html", "slides": "", "author_site": "Yujia Bao, Shiyu Chang, Regina Barzilay", "author": "Yujia Bao; Shiyu Chang; Regina Barzilay", "abstract": "We propose Predict then Interpolate (PI), a simple algorithm for learning correlations that are stable across environments. The algorithm follows from the intuition that when using a classifier trained on one environment to make predictions on examples from another environment, its mistakes are informative as to which correlations are unstable. In this work, we prove that by interpolating the distributions of the correct predictions and the wrong predictions, we can uncover an oracle distribution where the unstable correlation vanishes. Since the oracle interpolation coefficients are not accessible, we use group distributionally robust optimization to minimize the worst-case risk across all such interpolations. We evaluate our method on both text classification and image classification. Empirical results demonstrate that our algorithm is able to learn robust classifiers (outperforms IRM by 23.85% on synthetic environments and 12.41% on natural environments). Our code and data are available at https://github.com/YujiaBao/ Predict-then-Interpolate.", "bibtex": "@InProceedings{pmlr-v139-bao21a,\n title = \t {Predict then Interpolate: A Simple Algorithm to Learn Stable Classifiers},\n author = {Bao, Yujia and Chang, Shiyu and Barzilay, Regina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {640--650},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bao21a/bao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bao21a.html},\n abstract = \t {We propose Predict then Interpolate (PI), a simple algorithm for learning correlations that are stable across environments. The algorithm follows from the intuition that when using a classifier trained on one environment to make predictions on examples from another environment, its mistakes are informative as to which correlations are unstable. In this work, we prove that by interpolating the distributions of the correct predictions and the wrong predictions, we can uncover an oracle distribution where the unstable correlation vanishes. Since the oracle interpolation coefficients are not accessible, we use group distributionally robust optimization to minimize the worst-case risk across all such interpolations. We evaluate our method on both text classification and image classification. Empirical results demonstrate that our algorithm is able to learn robust classifiers (outperforms IRM by 23.85% on synthetic environments and 12.41% on natural environments). Our code and data are available at https://github.com/YujiaBao/ Predict-then-Interpolate.}\n}", "pdf": "http://proceedings.mlr.press/v139/bao21a/bao21a.pdf", "supp": "", "pdf_size": 1841505, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2357278583556296891&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "MIT CSAIL; MIT-IBM Watson AI Lab; MIT CSAIL", "aff_domain": "csail.mit.edu; ; ", "email": "csail.mit.edu; ; ", "github": "https://github.com/YujiaBao/Predict-then-Interpolate", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bao21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Prediction-Centric Learning of Independent Cascade Dynamics from Partial Observations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10525", "id": "10525", "proceeding": "http://proceedings.mlr.press/v139/wilinski21a.html", "slides": "/media/icml-2021/Slides/10525.pdf", "author_site": "Mateusz Wilinski, Andrey Lokhov", "author": "Mateusz Wilinski; Andrey Lokhov", "abstract": "Spreading processes play an increasingly important role in modeling for diffusion networks, information propagation, marketing and opinion setting. We address the problem of learning of a spreading model such that the predictions generated from this model are accurate and could be subsequently used for the optimization, and control of diffusion dynamics. We focus on a challenging setting where full observations of the dynamics are not available, and standard approaches such as maximum likelihood quickly become intractable for large network instances. We introduce a computationally efficient algorithm, based on a scalable dynamic message-passing approach, which is able to learn parameters of the effective spreading model given only limited information on the activation times of nodes in the network. The popular Independent Cascade model is used to illustrate our approach. We show that tractable inference from the learned model generates a better prediction of marginal probabilities compared to the original model. We develop a systematic procedure for learning a mixture of models which further improves the prediction quality.", "bibtex": "@InProceedings{pmlr-v139-wilinski21a,\n title = \t {Prediction-Centric Learning of Independent Cascade Dynamics from Partial Observations},\n author = {Wilinski, Mateusz and Lokhov, Andrey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11182--11192},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wilinski21a/wilinski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wilinski21a.html},\n abstract = \t {Spreading processes play an increasingly important role in modeling for diffusion networks, information propagation, marketing and opinion setting. We address the problem of learning of a spreading model such that the predictions generated from this model are accurate and could be subsequently used for the optimization, and control of diffusion dynamics. We focus on a challenging setting where full observations of the dynamics are not available, and standard approaches such as maximum likelihood quickly become intractable for large network instances. We introduce a computationally efficient algorithm, based on a scalable dynamic message-passing approach, which is able to learn parameters of the effective spreading model given only limited information on the activation times of nodes in the network. The popular Independent Cascade model is used to illustrate our approach. We show that tractable inference from the learned model generates a better prediction of marginal probabilities compared to the original model. We develop a systematic procedure for learning a mixture of models which further improves the prediction quality.}\n}", "pdf": "http://proceedings.mlr.press/v139/wilinski21a/wilinski21a.pdf", "supp": "", "pdf_size": 3429693, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10502404999928524540&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Theoretical Division, Los Alamos National Laboratory, Los Alamos, USA; Theoretical Division, Los Alamos National Laboratory, Los Alamos, USA", "aff_domain": "lanl.gov;lanl.gov", "email": "lanl.gov;lanl.gov", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wilinski21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Los Alamos National Laboratory", "aff_unique_dep": "Theoretical Division", "aff_unique_url": "https://www.lanl.gov", "aff_unique_abbr": "LANL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Alamos", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Preferential Temporal Difference Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10313", "id": "10313", "proceeding": "http://proceedings.mlr.press/v139/anand21a.html", "slides": "", "author_site": "Nishanth Anand, Doina Precup", "author": "Nishanth Anand; Doina Precup", "abstract": "Temporal-Difference (TD) learning is a general and very useful tool for estimating the value function of a given policy, which in turn is required to find good policies. Generally speaking, TD learning updates states whenever they are visited. When the agent lands in a state, its value can be used to compute the TD-error, which is then propagated to other states. However, it may be interesting, when computing updates, to take into account other information than whether a state is visited or not. For example, some states might be more important than others (such as states which are frequently seen in a successful trajectory). Or, some states might have unreliable value estimates (for example, due to partial observability or lack of data), making their values less desirable as targets. We propose an approach to re-weighting states used in TD updates, both when they are the input and when they provide the target for the update. We prove that our approach converges with linear function approximation and illustrate its desirable empirical behaviour compared to other TD-style methods.", "bibtex": "@InProceedings{pmlr-v139-anand21a,\n title = \t {Preferential Temporal Difference Learning},\n author = {Anand, Nishanth and Precup, Doina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {286--296},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/anand21a/anand21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/anand21a.html},\n abstract = \t {Temporal-Difference (TD) learning is a general and very useful tool for estimating the value function of a given policy, which in turn is required to find good policies. Generally speaking, TD learning updates states whenever they are visited. When the agent lands in a state, its value can be used to compute the TD-error, which is then propagated to other states. However, it may be interesting, when computing updates, to take into account other information than whether a state is visited or not. For example, some states might be more important than others (such as states which are frequently seen in a successful trajectory). Or, some states might have unreliable value estimates (for example, due to partial observability or lack of data), making their values less desirable as targets. We propose an approach to re-weighting states used in TD updates, both when they are the input and when they provide the target for the update. We prove that our approach converges with linear function approximation and illustrate its desirable empirical behaviour compared to other TD-style methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/anand21a/anand21a.pdf", "supp": "", "pdf_size": 7269363, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17314820173846745739&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Mila (Quebec Artificial Intelligence Institute), Montreal, Canada+School of Computer Science, McGill University, Montreal, Canada; Mila (Quebec Artificial Intelligence Institute), Montreal, Canada+School of Computer Science, McGill University, Montreal, Canada+Deepmind, Montreal, Canada", "aff_domain": "mail.mcgill.ca; ", "email": "mail.mcgill.ca; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/anand21a.html", "aff_unique_index": "0+1;0+1+2", "aff_unique_norm": "Quebec Artificial Intelligence Institute;McGill University;DeepMind", "aff_unique_dep": ";School of Computer Science;", "aff_unique_url": "https://mila.quebec;https://www.mcgill.ca;https://deepmind.com", "aff_unique_abbr": "Mila;McGill;DeepMind", "aff_campus_unique_index": "0+0;0+0+0", "aff_campus_unique": "Montreal", "aff_country_unique_index": "0+0;0+0+0", "aff_country_unique": "Canada" }, { "title": "Principal Bit Analysis: Autoencoding with Schur-Concave Loss", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9977", "id": "9977", "proceeding": "http://proceedings.mlr.press/v139/bhadane21a.html", "slides": "", "author_site": "Sourbh Bhadane, Aaron Wagner, Jayadev Acharya", "author": "Sourbh Bhadane; Aaron B Wagner; Jayadev Acharya", "abstract": "We consider a linear autoencoder in which the latent variables are quantized, or corrupted by noise, and the constraint is Schur-concave in the set of latent variances. Although finding the optimal encoder/decoder pair for this setup is a nonconvex optimization problem, we show that decomposing the source into its principal components is optimal. If the constraint is strictly Schur-concave and the empirical covariance matrix has only simple eigenvalues, then any optimal encoder/decoder must decompose the source in this way. As one application, we consider a strictly Schur-concave constraint that estimates the number of bits needed to represent the latent variables under fixed-rate encoding, a setup that we call \\emph{Principal Bit Analysis (PBA)}. This yields a practical, general-purpose, fixed-rate compressor that outperforms existing algorithms. As a second application, we show that a prototypical autoencoder-based variable-rate compressor is guaranteed to decompose the source into its principal components.", "bibtex": "@InProceedings{pmlr-v139-bhadane21a,\n title = \t {Principal Bit Analysis: Autoencoding with Schur-Concave Loss},\n author = {Bhadane, Sourbh and Wagner, Aaron B and Acharya, Jayadev},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {852--862},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bhadane21a/bhadane21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bhadane21a.html},\n abstract = \t {We consider a linear autoencoder in which the latent variables are quantized, or corrupted by noise, and the constraint is Schur-concave in the set of latent variances. Although finding the optimal encoder/decoder pair for this setup is a nonconvex optimization problem, we show that decomposing the source into its principal components is optimal. If the constraint is strictly Schur-concave and the empirical covariance matrix has only simple eigenvalues, then any optimal encoder/decoder must decompose the source in this way. As one application, we consider a strictly Schur-concave constraint that estimates the number of bits needed to represent the latent variables under fixed-rate encoding, a setup that we call \\emph{Principal Bit Analysis (PBA)}. This yields a practical, general-purpose, fixed-rate compressor that outperforms existing algorithms. As a second application, we show that a prototypical autoencoder-based variable-rate compressor is guaranteed to decompose the source into its principal components.}\n}", "pdf": "http://proceedings.mlr.press/v139/bhadane21a/bhadane21a.pdf", "supp": "", "pdf_size": 610354, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11365886742546689505&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Cornell University; Cornell University; Cornell University", "aff_domain": "cornell.edu; ; ", "email": "cornell.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bhadane21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Principal Component Hierarchy for Sparse Quadratic Programs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9113", "id": "9113", "proceeding": "http://proceedings.mlr.press/v139/vreugdenhil21a.html", "slides": "", "author_site": "Robbie Vreugdenhil, Viet Anh Nguyen, Armin Eftekhari, Peyman Mohajerin Esfahani", "author": "Robbie Vreugdenhil; Viet Anh Nguyen; Armin Eftekhari; Peyman Mohajerin Esfahani", "abstract": "We propose a novel approximation hierarchy for cardinality-constrained, convex quadratic programs that exploits the rank-dominating eigenvectors of the quadratic matrix. Each level of approximation admits a min-max characterization whose objective function can be optimized over the binary variables analytically, while preserving convexity in the continuous variables. Exploiting this property, we propose two scalable optimization algorithms, coined as the \u201cbest response\" and the \u201cdual program\", that can efficiently screen the potential indices of the nonzero elements of the original program. We show that the proposed methods are competitive with the existing screening methods in the current sparse regression literature, and it is particularly fast on instances with high number of measurements in experiments with both synthetic and real datasets.", "bibtex": "@InProceedings{pmlr-v139-vreugdenhil21a,\n title = \t {Principal Component Hierarchy for Sparse Quadratic Programs},\n author = {Vreugdenhil, Robbie and Nguyen, Viet Anh and Eftekhari, Armin and Esfahani, Peyman Mohajerin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10607--10616},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vreugdenhil21a/vreugdenhil21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vreugdenhil21a.html},\n abstract = \t {We propose a novel approximation hierarchy for cardinality-constrained, convex quadratic programs that exploits the rank-dominating eigenvectors of the quadratic matrix. Each level of approximation admits a min-max characterization whose objective function can be optimized over the binary variables analytically, while preserving convexity in the continuous variables. Exploiting this property, we propose two scalable optimization algorithms, coined as the \u201cbest response\" and the \u201cdual program\", that can efficiently screen the potential indices of the nonzero elements of the original program. We show that the proposed methods are competitive with the existing screening methods in the current sparse regression literature, and it is particularly fast on instances with high number of measurements in experiments with both synthetic and real datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/vreugdenhil21a/vreugdenhil21a.pdf", "supp": "", "pdf_size": 801211, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2335943370788592099&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Delft Center for Systems and Control, Delft University of Technology; Department of Management Science and Engineering, Stanford University + VinAI Research, Vietnam; Department of Mathematics and Mathematical Statistics, Umea University; Delft Center for Systems and Control, Delft University of Technology", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/vreugdenhil21a.html", "aff_unique_index": "0;1+2;3;0", "aff_unique_norm": "Delft University of Technology;Stanford University;VinAI Research;Ume\u00e5 University", "aff_unique_dep": "Delft Center for Systems and Control;Department of Management Science and Engineering;;Department of Mathematics and Mathematical Statistics", "aff_unique_url": "https://www.tudelft.nl;https://www.stanford.edu;https://www.vin.ai;https://www.umu.se", "aff_unique_abbr": "TU Delft;Stanford;VinAI;UMU", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Delft;Stanford;", "aff_country_unique_index": "0;1+2;3;0", "aff_country_unique": "Netherlands;United States;Vietnam;Sweden" }, { "title": "Principled Exploration via Optimistic Bootstrapping and Backward Induction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8831", "id": "8831", "proceeding": "http://proceedings.mlr.press/v139/bai21d.html", "slides": "", "author_site": "Chenjia Bai, Lingxiao Wang, Lei Han, Jianye Hao, Animesh Garg, Peng Liu, Zhaoran Wang", "author": "Chenjia Bai; Lingxiao Wang; Lei Han; Jianye Hao; Animesh Garg; Peng Liu; Zhaoran Wang", "abstract": "One principled approach for provably efficient exploration is incorporating the upper confidence bound (UCB) into the value function as a bonus. However, UCB is specified to deal with linear and tabular settings and is incompatible with Deep Reinforcement Learning (DRL). In this paper, we propose a principled exploration method for DRL through Optimistic Bootstrapping and Backward Induction (OB2I). OB2I constructs a general-purpose UCB-bonus through non-parametric bootstrap in DRL. The UCB-bonus estimates the epistemic uncertainty of state-action pairs for optimistic exploration. We build theoretical connections between the proposed UCB-bonus and the LSVI-UCB in linear setting. We propagate future uncertainty in a time-consistent manner through episodic backward update, which exploits the theoretical advantage and empirically improves the sample-efficiency. Our experiments in MNIST maze and Atari suit suggest that OB2I outperforms several state-of-the-art exploration approaches.", "bibtex": "@InProceedings{pmlr-v139-bai21d,\n title = \t {Principled Exploration via Optimistic Bootstrapping and Backward Induction},\n author = {Bai, Chenjia and Wang, Lingxiao and Han, Lei and Hao, Jianye and Garg, Animesh and Liu, Peng and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {577--587},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bai21d/bai21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/bai21d.html},\n abstract = \t {One principled approach for provably efficient exploration is incorporating the upper confidence bound (UCB) into the value function as a bonus. However, UCB is specified to deal with linear and tabular settings and is incompatible with Deep Reinforcement Learning (DRL). In this paper, we propose a principled exploration method for DRL through Optimistic Bootstrapping and Backward Induction (OB2I). OB2I constructs a general-purpose UCB-bonus through non-parametric bootstrap in DRL. The UCB-bonus estimates the epistemic uncertainty of state-action pairs for optimistic exploration. We build theoretical connections between the proposed UCB-bonus and the LSVI-UCB in linear setting. We propagate future uncertainty in a time-consistent manner through episodic backward update, which exploits the theoretical advantage and empirically improves the sample-efficiency. Our experiments in MNIST maze and Atari suit suggest that OB2I outperforms several state-of-the-art exploration approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/bai21d/bai21d.pdf", "supp": "", "pdf_size": 2850008, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=732043823350828929&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Harbin Institute of Technology; Northwestern University; Tencent Robotics X; Tianjin University; University of Toronto, Vector Institute; Harbin Institute of Technology; Northwestern University", "aff_domain": "stu.hit.edu.cn; ; ; ; ; ; ", "email": "stu.hit.edu.cn; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/bai21d.html", "aff_unique_index": "0;1;2;3;4;0;1", "aff_unique_norm": "Harbin Institute of Technology;Northwestern University;Tencent;Tianjin University;University of Toronto", "aff_unique_dep": ";;Tencent Robotics X;;", "aff_unique_url": "http://www.hit.edu.cn/;https://www.northwestern.edu;https://www.tencent.com;http://www.tju.edu.cn;https://www.utoronto.ca", "aff_unique_abbr": "HIT;NU;Tencent Robotics X;TJU;U of T", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "Harbin;;Toronto", "aff_country_unique_index": "0;1;0;0;2;0;1", "aff_country_unique": "China;United States;Canada" }, { "title": "Principled Simplicial Neural Networks for Trajectory Prediction", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10021", "id": "10021", "proceeding": "http://proceedings.mlr.press/v139/roddenberry21a.html", "slides": "", "author_site": "T. Mitchell Roddenberry, Nicholas Glaze, Santiago Segarra", "author": "T. Mitchell Roddenberry; Nicholas Glaze; Santiago Segarra", "abstract": "We consider the construction of neural network architectures for data on simplicial complexes. In studying maps on the chain complex of a simplicial complex, we define three desirable properties of a simplicial neural network architecture: namely, permutation equivariance, orientation equivariance, and simplicial awareness. The first two properties respectively account for the fact that the node indexing and the simplex orientations in a simplicial complex are arbitrary. The last property encodes the desirable feature that the output of the neural network depends on the entire simplicial complex and not on a subset of its dimensions. Based on these properties, we propose a simple convolutional architecture, rooted in tools from algebraic topology, for the problem of trajectory prediction, and show that it obeys all three of these properties when an odd, nonlinear activation function is used. We then demonstrate the effectiveness of this architecture in extrapolating trajectories on synthetic and real datasets, with particular emphasis on the gains in generalizability to unseen trajectories.", "bibtex": "@InProceedings{pmlr-v139-roddenberry21a,\n title = \t {Principled Simplicial Neural Networks for Trajectory Prediction},\n author = {Roddenberry, T. Mitchell and Glaze, Nicholas and Segarra, Santiago},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9020--9029},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/roddenberry21a/roddenberry21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/roddenberry21a.html},\n abstract = \t {We consider the construction of neural network architectures for data on simplicial complexes. In studying maps on the chain complex of a simplicial complex, we define three desirable properties of a simplicial neural network architecture: namely, permutation equivariance, orientation equivariance, and simplicial awareness. The first two properties respectively account for the fact that the node indexing and the simplex orientations in a simplicial complex are arbitrary. The last property encodes the desirable feature that the output of the neural network depends on the entire simplicial complex and not on a subset of its dimensions. Based on these properties, we propose a simple convolutional architecture, rooted in tools from algebraic topology, for the problem of trajectory prediction, and show that it obeys all three of these properties when an odd, nonlinear activation function is used. We then demonstrate the effectiveness of this architecture in extrapolating trajectories on synthetic and real datasets, with particular emphasis on the gains in generalizability to unseen trajectories.}\n}", "pdf": "http://proceedings.mlr.press/v139/roddenberry21a/roddenberry21a.pdf", "supp": "", "pdf_size": 461012, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4466528152103096087&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, Rice University; Department of Electrical and Computer Engineering, Rice University", "aff_domain": "rice.edu;rice.edu;rice.edu", "email": "rice.edu;rice.edu;rice.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/roddenberry21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Rice University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.rice.edu", "aff_unique_abbr": "Rice", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Prior Image-Constrained Reconstruction using Style-Based Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9541", "id": "9541", "proceeding": "http://proceedings.mlr.press/v139/kelkar21a.html", "slides": "/media/icml-2021/Slides/9541.pdf", "author_site": "Varun A. Kelkar, Mark Anastasio", "author": "Varun A Kelkar; Mark Anastasio", "abstract": "Obtaining a useful estimate of an object from highly incomplete imaging measurements remains a holy grail of imaging science. Deep learning methods have shown promise in learning object priors or constraints to improve the conditioning of an ill-posed imaging inverse problem. In this study, a framework for estimating an object of interest that is semantically related to a known prior image, is proposed. An optimization problem is formulated in the disentangled latent space of a style-based generative model, and semantically meaningful constraints are imposed using the disentangled latent representation of the prior image. Stable recovery from incomplete measurements with the help of a prior image is theoretically analyzed. Numerical experiments demonstrating the superior performance of our approach as compared to related methods are presented.", "bibtex": "@InProceedings{pmlr-v139-kelkar21a,\n title = \t {Prior Image-Constrained Reconstruction using Style-Based Generative Models},\n author = {Kelkar, Varun A and Anastasio, Mark},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5367--5377},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kelkar21a/kelkar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kelkar21a.html},\n abstract = \t {Obtaining a useful estimate of an object from highly incomplete imaging measurements remains a holy grail of imaging science. Deep learning methods have shown promise in learning object priors or constraints to improve the conditioning of an ill-posed imaging inverse problem. In this study, a framework for estimating an object of interest that is semantically related to a known prior image, is proposed. An optimization problem is formulated in the disentangled latent space of a style-based generative model, and semantically meaningful constraints are imposed using the disentangled latent representation of the prior image. Stable recovery from incomplete measurements with the help of a prior image is theoretically analyzed. Numerical experiments demonstrating the superior performance of our approach as compared to related methods are presented.}\n}", "pdf": "http://proceedings.mlr.press/v139/kelkar21a/kelkar21a.pdf", "supp": "", "pdf_size": 4262551, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11782166038775253980&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu;illinois.edu", "email": "illinois.edu;illinois.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kelkar21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Prioritized Level Replay", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9283", "id": "9283", "proceeding": "http://proceedings.mlr.press/v139/jiang21b.html", "slides": "/media/icml-2021/Slides/9283.pdf", "author_site": "Minqi Jiang, Edward Grefenstette, Tim Rockt\u00e4schel", "author": "Minqi Jiang; Edward Grefenstette; Tim Rockt\u00e4schel", "abstract": "Environments with procedurally generated content serve as important benchmarks for testing systematic generalization in deep reinforcement learning. In this setting, each level is an algorithmically created environment instance with a unique configuration of its factors of variation. Training on a prespecified subset of levels allows for testing generalization to unseen levels. What can be learned from a level depends on the current policy, yet prior work defaults to uniform sampling of training levels independently of the policy. We introduce Prioritized Level Replay (PLR), a general framework for selectively sampling the next training level by prioritizing those with higher estimated learning potential when revisited in the future. We show TD-errors effectively estimate a level\u2019s future learning potential and, when used to guide the sampling procedure, induce an emergent curriculum of increasingly difficult levels. By adapting the sampling of training levels, PLR significantly improves sample-efficiency and generalization on Procgen Benchmark\u2014matching the previous state-of-the-art in test return\u2014and readily combines with other methods. Combined with the previous leading method, PLR raises the state-of-the-art to over 76% improvement in test return relative to standard RL baselines.", "bibtex": "@InProceedings{pmlr-v139-jiang21b,\n title = \t {Prioritized Level Replay},\n author = {Jiang, Minqi and Grefenstette, Edward and Rockt{\\\"a}schel, Tim},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4940--4950},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21b/jiang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21b.html},\n abstract = \t {Environments with procedurally generated content serve as important benchmarks for testing systematic generalization in deep reinforcement learning. In this setting, each level is an algorithmically created environment instance with a unique configuration of its factors of variation. Training on a prespecified subset of levels allows for testing generalization to unseen levels. What can be learned from a level depends on the current policy, yet prior work defaults to uniform sampling of training levels independently of the policy. We introduce Prioritized Level Replay (PLR), a general framework for selectively sampling the next training level by prioritizing those with higher estimated learning potential when revisited in the future. We show TD-errors effectively estimate a level\u2019s future learning potential and, when used to guide the sampling procedure, induce an emergent curriculum of increasingly difficult levels. By adapting the sampling of training levels, PLR significantly improves sample-efficiency and generalization on Procgen Benchmark\u2014matching the previous state-of-the-art in test return\u2014and readily combines with other methods. Combined with the previous leading method, PLR raises the state-of-the-art to over 76% improvement in test return relative to standard RL baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21b/jiang21b.pdf", "supp": "", "pdf_size": 2161126, "gs_citation": 194, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18011658212512846682&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Facebook AI Research, London, United Kingdom + University College London, London, United Kingdom; Facebook AI Research, London, United Kingdom + University College London, London, United Kingdom; Facebook AI Research, London, United Kingdom + University College London, London, United Kingdom", "aff_domain": "fb.com; ; ", "email": "fb.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/jiang21b.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Meta;University College London", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.ucl.ac.uk", "aff_unique_abbr": "FAIR;UCL", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "London", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Privacy-Preserving Feature Selection with Secure Multiparty Computation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9777", "id": "9777", "proceeding": "http://proceedings.mlr.press/v139/li21e.html", "slides": "", "author_site": "Xiling Li, Rafael Dowsley, Martine De Cock", "author": "Xiling Li; Rafael Dowsley; Martine De Cock", "abstract": "Existing work on privacy-preserving machine learning with Secure Multiparty Computation (MPC) is almost exclusively focused on model training and on inference with trained models, thereby overlooking the important data pre-processing stage. In this work, we propose the first MPC based protocol for private feature selection based on the filter method, which is independent of model training, and can be used in combination with any MPC protocol to rank features. We propose an efficient feature scoring protocol based on Gini impurity to this end. To demonstrate the feasibility of our approach for practical data science, we perform experiments with the proposed MPC protocols for feature selection in a commonly used machine-learning-as-a-service configuration where computations are outsourced to multiple servers, with semi-honest and with malicious adversaries. Regarding effectiveness, we show that secure feature selection with the proposed protocols improves the accuracy of classifiers on a variety of real-world data sets, without leaking information about the feature values or even which features were selected. Regarding efficiency, we document runtimes ranging from several seconds to an hour for our protocols to finish, depending on the size of the data set and the security settings.", "bibtex": "@InProceedings{pmlr-v139-li21e,\n title = \t {Privacy-Preserving Feature Selection with Secure Multiparty Computation},\n author = {Li, Xiling and Dowsley, Rafael and De Cock, Martine},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6326--6336},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21e/li21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21e.html},\n abstract = \t {Existing work on privacy-preserving machine learning with Secure Multiparty Computation (MPC) is almost exclusively focused on model training and on inference with trained models, thereby overlooking the important data pre-processing stage. In this work, we propose the first MPC based protocol for private feature selection based on the filter method, which is independent of model training, and can be used in combination with any MPC protocol to rank features. We propose an efficient feature scoring protocol based on Gini impurity to this end. To demonstrate the feasibility of our approach for practical data science, we perform experiments with the proposed MPC protocols for feature selection in a commonly used machine-learning-as-a-service configuration where computations are outsourced to multiple servers, with semi-honest and with malicious adversaries. Regarding effectiveness, we show that secure feature selection with the proposed protocols improves the accuracy of classifiers on a variety of real-world data sets, without leaking information about the feature values or even which features were selected. Regarding efficiency, we document runtimes ranging from several seconds to an hour for our protocols to finish, depending on the size of the data set and the security settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21e/li21e.pdf", "supp": "", "pdf_size": 473987, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4353777882364497374&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "School of Engineering and Technology, University of Washington, Tacoma, Washington, USA+Department of Appl. Math., Computer Science and Statistics, Ghent University, Ghent, Belgium; Faculty of Information Technology, Monash University, Clayton, Australia; Department of Appl. Math., Computer Science and Statistics, Ghent University, Ghent, Belgium", "aff_domain": "uw.edu; ; ", "email": "uw.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21e.html", "aff_unique_index": "0+1;2;1", "aff_unique_norm": "University of Washington;Ghent University;Monash University", "aff_unique_dep": "School of Engineering and Technology;Department of Appl. Math., Computer Science and Statistics;Faculty of Information Technology", "aff_unique_url": "https://www.washington.edu;https://www.ugent.be;https://www.monash.edu", "aff_unique_abbr": "UW;UGent;Monash", "aff_campus_unique_index": "0+1;2;1", "aff_campus_unique": "Tacoma;Ghent;Clayton", "aff_country_unique_index": "0+1;2;1", "aff_country_unique": "United States;Belgium;Australia" }, { "title": "Privacy-Preserving Video Classification with Convolutional Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10097", "id": "10097", "proceeding": "http://proceedings.mlr.press/v139/pentyala21a.html", "slides": "/media/icml-2021/Slides/10097.pdf", "author_site": "Sikha Pentyala, Rafael Dowsley, Martine De Cock", "author": "Sikha Pentyala; Rafael Dowsley; Martine De Cock", "abstract": "Many video classification applications require access to personal data, thereby posing an invasive security risk to the users\u2019 privacy. We propose a privacy-preserving implementation of single-frame method based video classification with convolutional neural networks that allows a party to infer a label from a video without necessitating the video owner to disclose their video to other entities in an unencrypted manner. Similarly, our approach removes the requirement of the classifier owner from revealing their model parameters to outside entities in plaintext. To this end, we combine existing Secure Multi-Party Computation (MPC) protocols for private image classification with our novel MPC protocols for oblivious single-frame selection and secure label aggregation across frames. The result is an end-to-end privacy-preserving video classification pipeline. We evaluate our proposed solution in an application for private human emotion recognition. Our results across a variety of security settings, spanning honest and dishonest majority configurations of the computing parties, and for both passive and active adversaries, demonstrate that videos can be classified with state-of-the-art accuracy, and without leaking sensitive user information.", "bibtex": "@InProceedings{pmlr-v139-pentyala21a,\n title = \t {Privacy-Preserving Video Classification with Convolutional Neural Networks},\n author = {Pentyala, Sikha and Dowsley, Rafael and De Cock, Martine},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8487--8499},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pentyala21a/pentyala21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/pentyala21a.html},\n abstract = \t {Many video classification applications require access to personal data, thereby posing an invasive security risk to the users\u2019 privacy. We propose a privacy-preserving implementation of single-frame method based video classification with convolutional neural networks that allows a party to infer a label from a video without necessitating the video owner to disclose their video to other entities in an unencrypted manner. Similarly, our approach removes the requirement of the classifier owner from revealing their model parameters to outside entities in plaintext. To this end, we combine existing Secure Multi-Party Computation (MPC) protocols for private image classification with our novel MPC protocols for oblivious single-frame selection and secure label aggregation across frames. The result is an end-to-end privacy-preserving video classification pipeline. We evaluate our proposed solution in an application for private human emotion recognition. Our results across a variety of security settings, spanning honest and dishonest majority configurations of the computing parties, and for both passive and active adversaries, demonstrate that videos can be classified with state-of-the-art accuracy, and without leaking sensitive user information.}\n}", "pdf": "http://proceedings.mlr.press/v139/pentyala21a/pentyala21a.pdf", "supp": "", "pdf_size": 1234092, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7039242115498037265&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "School of Engineering and Technology, University of Washington, Tacoma, WA, USA+Dept. of Appl. Math., Computer Science and Statistics, Ghent University, Ghent, Belgium; Faculty of Information Technology, Monash University, Clayton, Australia; Dept. of Appl. Math., Computer Science and Statistics, Ghent University, Ghent, Belgium", "aff_domain": "uw.edu;monash.edu;uw.edu", "email": "uw.edu;monash.edu;uw.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/pentyala21a.html", "aff_unique_index": "0+1;2;1", "aff_unique_norm": "University of Washington;Ghent University;Monash University", "aff_unique_dep": "School of Engineering and Technology;Dept. of Appl. Math., Computer Science and Statistics;Faculty of Information Technology", "aff_unique_url": "https://www.washington.edu;https://www.ugent.be;https://www.monash.edu", "aff_unique_abbr": "UW;UGent;Monash", "aff_campus_unique_index": "0+1;2;1", "aff_campus_unique": "Tacoma;Ghent;Clayton", "aff_country_unique_index": "0+1;2;1", "aff_country_unique": "United States;Belgium;Australia" }, { "title": "Private Adaptive Gradient Methods for Convex Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9209", "id": "9209", "proceeding": "http://proceedings.mlr.press/v139/asi21a.html", "slides": "/media/icml-2021/Slides/9209.pdf", "author_site": "Hilal Asi, John Duchi, Alireza Fallah, Omid Javidbakht, Kunal Talwar", "author": "Hilal Asi; John Duchi; Alireza Fallah; Omid Javidbakht; Kunal Talwar", "abstract": "We study adaptive methods for differentially private convex optimization, proposing and analyzing differentially private variants of a Stochastic Gradient Descent (SGD) algorithm with adaptive stepsizes, as well as the AdaGrad algorithm. We provide upper bounds on the regret of both algorithms and show that the bounds are (worst-case) optimal. As a consequence of our development, we show that our private versions of AdaGrad outperform adaptive SGD, which in turn outperforms traditional SGD in scenarios with non-isotropic gradients where (non-private) Adagrad provably outperforms SGD. The major challenge is that the isotropic noise typically added for privacy dominates the signal in gradient geometry for high-dimensional problems; approaches to this that effectively optimize over lower-dimensional subspaces simply ignore the actual problems that varying gradient geometries introduce. In contrast, we study non-isotropic clipping and noise addition, developing a principled theoretical approach; the consequent procedures also enjoy significantly stronger empirical performance than prior approaches.", "bibtex": "@InProceedings{pmlr-v139-asi21a,\n title = \t {Private Adaptive Gradient Methods for Convex Optimization},\n author = {Asi, Hilal and Duchi, John and Fallah, Alireza and Javidbakht, Omid and Talwar, Kunal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {383--392},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/asi21a/asi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/asi21a.html},\n abstract = \t {We study adaptive methods for differentially private convex optimization, proposing and analyzing differentially private variants of a Stochastic Gradient Descent (SGD) algorithm with adaptive stepsizes, as well as the AdaGrad algorithm. We provide upper bounds on the regret of both algorithms and show that the bounds are (worst-case) optimal. As a consequence of our development, we show that our private versions of AdaGrad outperform adaptive SGD, which in turn outperforms traditional SGD in scenarios with non-isotropic gradients where (non-private) Adagrad provably outperforms SGD. The major challenge is that the isotropic noise typically added for privacy dominates the signal in gradient geometry for high-dimensional problems; approaches to this that effectively optimize over lower-dimensional subspaces simply ignore the actual problems that varying gradient geometries introduce. In contrast, we study non-isotropic clipping and noise addition, developing a principled theoretical approach; the consequent procedures also enjoy significantly stronger empirical performance than prior approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/asi21a/asi21a.pdf", "supp": "", "pdf_size": 2292152, "gs_citation": 69, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17056023877443673441&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/asi21a.html" }, { "title": "Private Alternating Least Squares: Practical Private Matrix Completion with Tighter Rates", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10123", "id": "10123", "proceeding": "http://proceedings.mlr.press/v139/chien21a.html", "slides": "", "author_site": "Steve Chien, Prateek Jain, Walid Krichene, Steffen Rendle, Shuang Song, Abhradeep Guha Thakurta, Li Zhang", "author": "Steve Chien; Prateek Jain; Walid Krichene; Steffen Rendle; Shuang Song; Abhradeep Thakurta; Li Zhang", "abstract": "We study the problem of differentially private (DP) matrix completion under user-level privacy. We design a joint differentially private variant of the popular Alternating-Least-Squares (ALS) method that achieves: i) (nearly) optimal sample complexity for matrix completion (in terms of number of items, users), and ii) the best known privacy/utility trade-off both theoretically, as well as on benchmark data sets. In particular, we provide the first global convergence analysis of ALS with noise introduced to ensure DP, and show that, in comparison to the best known alternative (the Private Frank-Wolfe algorithm by Jain et al. (2018)), our error bounds scale significantly better with respect to the number of items and users, which is critical in practical problems. Extensive validation on standard benchmarks demonstrate that the algorithm, in combination with carefully designed sampling procedures, is significantly more accurate than existing techniques, thus promising to be the first practical DP embedding model.", "bibtex": "@InProceedings{pmlr-v139-chien21a,\n title = \t {Private Alternating Least Squares: Practical Private Matrix Completion with Tighter Rates},\n author = {Chien, Steve and Jain, Prateek and Krichene, Walid and Rendle, Steffen and Song, Shuang and Thakurta, Abhradeep and Zhang, Li},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1877--1887},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chien21a/chien21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chien21a.html},\n abstract = \t {We study the problem of differentially private (DP) matrix completion under user-level privacy. We design a joint differentially private variant of the popular Alternating-Least-Squares (ALS) method that achieves: i) (nearly) optimal sample complexity for matrix completion (in terms of number of items, users), and ii) the best known privacy/utility trade-off both theoretically, as well as on benchmark data sets. In particular, we provide the first global convergence analysis of ALS with noise introduced to ensure DP, and show that, in comparison to the best known alternative (the Private Frank-Wolfe algorithm by Jain et al. (2018)), our error bounds scale significantly better with respect to the number of items and users, which is critical in practical problems. Extensive validation on standard benchmarks demonstrate that the algorithm, in combination with carefully designed sampling procedures, is significantly more accurate than existing techniques, thus promising to be the first practical DP embedding model.}\n}", "pdf": "http://proceedings.mlr.press/v139/chien21a/chien21a.pdf", "supp": "", "pdf_size": 2739500, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15172906152119984586&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", "email": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/chien21a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Private Stochastic Convex Optimization: Optimal Rates in L1 Geometry", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9989", "id": "9989", "proceeding": "http://proceedings.mlr.press/v139/asi21b.html", "slides": "/media/icml-2021/Slides/9989.pdf", "author_site": "Hilal Asi, Vitaly Feldman, Tomer Koren, Kunal Talwar", "author": "Hilal Asi; Vitaly Feldman; Tomer Koren; Kunal Talwar", "abstract": "Stochastic convex optimization over an $\\ell_1$-bounded domain is ubiquitous in machine learning applications such as LASSO but remains poorly understood when learning with differential privacy. We show that, up to logarithmic factors the optimal excess population loss of any $(\\epsilon,\\delta)$-differentially private optimizer is $\\sqrt{\\log(d)/n} + \\sqrt{d}/\\epsilon n.$ The upper bound is based on a new algorithm that combines the iterative localization approach of Feldman et al. (2020) with a new analysis of private regularized mirror descent. It applies to $\\ell_p$ bounded domains for $p\\in [1,2]$ and queries at most $n^{3/2}$ gradients improving over the best previously known algorithm for the $\\ell_2$ case which needs $n^2$ gradients. Further, we show that when the loss functions satisfy additional smoothness assumptions, the excess loss is upper bounded (up to logarithmic factors) by $\\sqrt{\\log(d)/n} + (\\log(d)/\\epsilon n)^{2/3}.$ This bound is achieved by a new variance-reduced version of the Frank-Wolfe algorithm that requires just a single pass over the data. We also show that the lower bound in this case is the minimum of the two rates mentioned above.", "bibtex": "@InProceedings{pmlr-v139-asi21b,\n title = \t {Private Stochastic Convex Optimization: Optimal Rates in L1 Geometry},\n author = {Asi, Hilal and Feldman, Vitaly and Koren, Tomer and Talwar, Kunal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {393--403},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/asi21b/asi21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/asi21b.html},\n abstract = \t {Stochastic convex optimization over an $\\ell_1$-bounded domain is ubiquitous in machine learning applications such as LASSO but remains poorly understood when learning with differential privacy. We show that, up to logarithmic factors the optimal excess population loss of any $(\\epsilon,\\delta)$-differentially private optimizer is $\\sqrt{\\log(d)/n} + \\sqrt{d}/\\epsilon n.$ The upper bound is based on a new algorithm that combines the iterative localization approach of Feldman et al. (2020) with a new analysis of private regularized mirror descent. It applies to $\\ell_p$ bounded domains for $p\\in [1,2]$ and queries at most $n^{3/2}$ gradients improving over the best previously known algorithm for the $\\ell_2$ case which needs $n^2$ gradients. Further, we show that when the loss functions satisfy additional smoothness assumptions, the excess loss is upper bounded (up to logarithmic factors) by $\\sqrt{\\log(d)/n} + (\\log(d)/\\epsilon n)^{2/3}.$ This bound is achieved by a new variance-reduced version of the Frank-Wolfe algorithm that requires just a single pass over the data. We also show that the lower bound in this case is the minimum of the two rates mentioned above.}\n}", "pdf": "http://proceedings.mlr.press/v139/asi21b/asi21b.pdf", "supp": "", "pdf_size": 367295, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1067227008896338032&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Engineering, Stanford University; Apple + Blavatnik School of Computer Science, Tel Aviv University, and Google Research Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University, and Google Research Tel Aviv; Apple", "aff_domain": "stanford.edu; ; ; ", "email": "stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/asi21b.html", "aff_unique_index": "0;1+2;2;1", "aff_unique_norm": "Stanford University;Apple;Tel Aviv University", "aff_unique_dep": "Department of Electrical Engineering;Apple Inc.;Blavatnik School of Computer Science", "aff_unique_url": "https://www.stanford.edu;https://www.apple.com;https://www.tau.ac.il", "aff_unique_abbr": "Stanford;Apple;TAU", "aff_campus_unique_index": "0;2;2", "aff_campus_unique": "Stanford;;Tel Aviv", "aff_country_unique_index": "0;0+1;1;0", "aff_country_unique": "United States;Israel" }, { "title": "ProGraML: A Graph-based Program Representation for Data Flow Analysis and Compiler Optimizations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8885", "id": "8885", "proceeding": "http://proceedings.mlr.press/v139/cummins21a.html", "slides": "", "author_site": "Chris Cummins, Zacharias Fisches, Tal Ben-Nun, Torsten Hoefler, Michael O'Boyle, Hugh Leather", "author": "Chris Cummins; Zacharias V. Fisches; Tal Ben-Nun; Torsten Hoefler; Michael F P O\u2019Boyle; Hugh Leather", "abstract": "Machine learning (ML) is increasingly seen as a viable approach for building compiler optimization heuristics, but many ML methods cannot replicate even the simplest of the data flow analyses that are critical to making good optimization decisions. We posit that if ML cannot do that, then it is insufficiently able to reason about programs. We formulate data flow analyses as supervised learning tasks and introduce a large open dataset of programs and their corresponding labels from several analyses. We use this dataset to benchmark ML methods and show that they struggle on these fundamental program reasoning tasks. We propose ProGraML - Program Graphs for Machine Learning - a language-independent, portable representation of program semantics. ProGraML overcomes the limitations of prior works and yields improved performance on downstream optimization tasks.", "bibtex": "@InProceedings{pmlr-v139-cummins21a,\n title = \t {ProGraML: A Graph-based Program Representation for Data Flow Analysis and Compiler Optimizations},\n author = {Cummins, Chris and Fisches, Zacharias V. and Ben-Nun, Tal and Hoefler, Torsten and O'Boyle, Michael F P and Leather, Hugh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2244--2253},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cummins21a/cummins21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cummins21a.html},\n abstract = \t {Machine learning (ML) is increasingly seen as a viable approach for building compiler optimization heuristics, but many ML methods cannot replicate even the simplest of the data flow analyses that are critical to making good optimization decisions. We posit that if ML cannot do that, then it is insufficiently able to reason about programs. We formulate data flow analyses as supervised learning tasks and introduce a large open dataset of programs and their corresponding labels from several analyses. We use this dataset to benchmark ML methods and show that they struggle on these fundamental program reasoning tasks. We propose ProGraML - Program Graphs for Machine Learning - a language-independent, portable representation of program semantics. ProGraML overcomes the limitations of prior works and yields improved performance on downstream optimization tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/cummins21a/cummins21a.pdf", "supp": "", "pdf_size": 504999, "gs_citation": 142, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15292684952528924409&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 23, "aff": "Facebook AI Research, USA+ETH Z\u00fcrich, Switzerland; ETH Z\u00fcrich, Switzerland; ETH Z\u00fcrich, Switzerland; ETH Z\u00fcrich, Switzerland; University of Edinburgh, United Kingdom; Facebook AI Research, USA", "aff_domain": "fb.com; ; ; ; ; ", "email": "fb.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cummins21a.html", "aff_unique_index": "0+1;1;1;1;2;0", "aff_unique_norm": "Meta;ETH Zurich;University of Edinburgh", "aff_unique_dep": "Facebook AI Research;;", "aff_unique_url": "https://research.facebook.com;https://www.ethz.ch;https://www.ed.ac.uk", "aff_unique_abbr": "FAIR;ETHZ;Edinburgh", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;1;1;2;0", "aff_country_unique": "United States;Switzerland;United Kingdom" }, { "title": "Probabilistic Generating Circuits", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10459", "id": "10459", "proceeding": "http://proceedings.mlr.press/v139/zhang21i.html", "slides": "", "author_site": "Honghua Zhang, Brendan Juba, Guy Van den Broeck", "author": "Honghua Zhang; Brendan Juba; Guy Van Den Broeck", "abstract": "Generating functions, which are widely used in combinatorics and probability theory, encode function values into the coefficients of a polynomial. In this paper, we explore their use as a tractable probabilistic model, and propose probabilistic generating circuits (PGCs) for their efficient representation. PGCs are strictly more expressive efficient than many existing tractable probabilistic models, including determinantal point processes (DPPs), probabilistic circuits (PCs) such as sum-product networks, and tractable graphical models. We contend that PGCs are not just a theoretical framework that unifies vastly different existing models, but also show great potential in modeling realistic data. We exhibit a simple class of PGCs that are not trivially subsumed by simple combinations of PCs and DPPs, and obtain competitive performance on a suite of density estimation benchmarks. We also highlight PGCs\u2019 connection to the theory of strongly Rayleigh distributions.", "bibtex": "@InProceedings{pmlr-v139-zhang21i,\n title = \t {Probabilistic Generating Circuits},\n author = {Zhang, Honghua and Juba, Brendan and Van Den Broeck, Guy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12447--12457},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21i/zhang21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21i.html},\n abstract = \t {Generating functions, which are widely used in combinatorics and probability theory, encode function values into the coefficients of a polynomial. In this paper, we explore their use as a tractable probabilistic model, and propose probabilistic generating circuits (PGCs) for their efficient representation. PGCs are strictly more expressive efficient than many existing tractable probabilistic models, including determinantal point processes (DPPs), probabilistic circuits (PCs) such as sum-product networks, and tractable graphical models. We contend that PGCs are not just a theoretical framework that unifies vastly different existing models, but also show great potential in modeling realistic data. We exhibit a simple class of PGCs that are not trivially subsumed by simple combinations of PCs and DPPs, and obtain competitive performance on a suite of density estimation benchmarks. We also highlight PGCs\u2019 connection to the theory of strongly Rayleigh distributions.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21i/zhang21i.pdf", "supp": "", "pdf_size": 1191403, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17862854741765705312&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Computer Science Department, University of California Los Angeles, USA+1; Computer Science Department, Washington University in St. Louis, Missouri, USA+2; Computer Science Department, University of California Los Angeles, USA+1", "aff_domain": "cs.ucla.edu; ; ", "email": "cs.ucla.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21i.html", "aff_unique_index": "0;2;0", "aff_unique_norm": "University of California, Los Angeles;;Washington University in St. Louis", "aff_unique_dep": "Computer Science Department;;Computer Science Department", "aff_unique_url": "https://www.ucla.edu;;https://wustl.edu", "aff_unique_abbr": "UCLA;;WUSTL", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "Los Angeles;;St. Louis", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States;" }, { "title": "Probabilistic Programs with Stochastic Conditioning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8607", "id": "8607", "proceeding": "http://proceedings.mlr.press/v139/tolpin21a.html", "slides": "/media/icml-2021/Slides/8607.pdf", "author_site": "David Tolpin, Yuan Zhou, Tom Rainforth, Hongseok Yang", "author": "David Tolpin; Yuan Zhou; Tom Rainforth; Hongseok Yang", "abstract": "We tackle the problem of conditioning probabilistic programs on distributions of observable variables. Probabilistic programs are usually conditioned on samples from the joint data distribution, which we refer to as deterministic conditioning. However, in many real-life scenarios, the observations are given as marginal distributions, summary statistics, or samplers. Conventional probabilistic programming systems lack adequate means for modeling and inference in such scenarios. We propose a generalization of deterministic conditioning to stochastic conditioning, that is, conditioning on the marginal distribution of a variable taking a particular form. To this end, we first define the formal notion of stochastic conditioning and discuss its key properties. We then show how to perform inference in the presence of stochastic conditioning. We demonstrate potential usage of stochastic conditioning on several case studies which involve various kinds of stochastic conditioning and are difficult to solve otherwise. Although we present stochastic conditioning in the context of probabilistic programming, our formalization is general and applicable to other settings.", "bibtex": "@InProceedings{pmlr-v139-tolpin21a,\n title = \t {Probabilistic Programs with Stochastic Conditioning},\n author = {Tolpin, David and Zhou, Yuan and Rainforth, Tom and Yang, Hongseok},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10312--10323},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tolpin21a/tolpin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tolpin21a.html},\n abstract = \t {We tackle the problem of conditioning probabilistic programs on distributions of observable variables. Probabilistic programs are usually conditioned on samples from the joint data distribution, which we refer to as deterministic conditioning. However, in many real-life scenarios, the observations are given as marginal distributions, summary statistics, or samplers. Conventional probabilistic programming systems lack adequate means for modeling and inference in such scenarios. We propose a generalization of deterministic conditioning to stochastic conditioning, that is, conditioning on the marginal distribution of a variable taking a particular form. To this end, we first define the formal notion of stochastic conditioning and discuss its key properties. We then show how to perform inference in the presence of stochastic conditioning. We demonstrate potential usage of stochastic conditioning on several case studies which involve various kinds of stochastic conditioning and are difficult to solve otherwise. Although we present stochastic conditioning in the context of probabilistic programming, our formalization is general and applicable to other settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/tolpin21a/tolpin21a.pdf", "supp": "", "pdf_size": 1916437, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1683743898949838674&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Ben-Gurion University of the Negev; Artificial Intelligence Research Center, DII; University of Oxford; School of Computing, KAIST", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tolpin21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Ben-Gurion University of the Negev;DII;University of Oxford;KAIST", "aff_unique_dep": ";Artificial Intelligence Research Center;;School of Computing", "aff_unique_url": "https://www.bgu.ac.il;;https://www.ox.ac.uk;https://www.kaist.ac.kr", "aff_unique_abbr": "BGU;DII;Oxford;KAIST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;2;3", "aff_country_unique": "Israel;;United Kingdom;South Korea" }, { "title": "Probabilistic Sequential Shrinking: A Best Arm Identification Algorithm for Stochastic Bandits with Corruptions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10685", "id": "10685", "proceeding": "http://proceedings.mlr.press/v139/zhong21a.html", "slides": "", "author_site": "Zixin Zhong, Wang Chi Cheung, Vincent Tan", "author": "Zixin Zhong; Wang Chi Cheung; Vincent Tan", "abstract": "We consider a best arm identification (BAI) problem for stochastic bandits with adversarial corruptions in the fixed-budget setting of T steps. We design a novel randomized algorithm, Probabilistic Sequential Shrinking(u) (PSS(u)), which is agnostic to the amount of corruptions. When the amount of corruptions per step (CPS) is below a threshold, PSS(u) identifies the best arm or item with probability tending to 1 as T{\\rightarrow}$\\infty$. Otherwise, the optimality gap of the identified item degrades gracefully with the CPS.We argue that such a bifurcation is necessary. In PSS(u), the parameter u serves to balance between the optimality gap and success probability. The injection of randomization is shown to be essential to mitigate the impact of corruptions. To demonstrate this, we design two attack strategies that are applicable to any algorithm. We apply one of them to a deterministic analogue of PSS(u) known as Successive Halving (SH) by Karnin et al. (2013). The attack strategy results in a high failure probability for SH, but PSS(u) remains robust. In the absence of corruptions, PSS(2)\u2019s performance guarantee matches SH\u2019s. We show that when the CPS is sufficiently large, no algorithm can achieve a BAI probability tending to 1 as T{\\rightarrow}$\\infty$. Numerical experiments corroborate our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-zhong21a,\n title = \t {Probabilistic Sequential Shrinking: A Best Arm Identification Algorithm for Stochastic Bandits with Corruptions},\n author = {Zhong, Zixin and Cheung, Wang Chi and Tan, Vincent},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12772--12781},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhong21a/zhong21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhong21a.html},\n abstract = \t {We consider a best arm identification (BAI) problem for stochastic bandits with adversarial corruptions in the fixed-budget setting of T steps. We design a novel randomized algorithm, Probabilistic Sequential Shrinking(u) (PSS(u)), which is agnostic to the amount of corruptions. When the amount of corruptions per step (CPS) is below a threshold, PSS(u) identifies the best arm or item with probability tending to 1 as T{\\rightarrow}$\\infty$. Otherwise, the optimality gap of the identified item degrades gracefully with the CPS.We argue that such a bifurcation is necessary. In PSS(u), the parameter u serves to balance between the optimality gap and success probability. The injection of randomization is shown to be essential to mitigate the impact of corruptions. To demonstrate this, we design two attack strategies that are applicable to any algorithm. We apply one of them to a deterministic analogue of PSS(u) known as Successive Halving (SH) by Karnin et al. (2013). The attack strategy results in a high failure probability for SH, but PSS(u) remains robust. In the absence of corruptions, PSS(2)\u2019s performance guarantee matches SH\u2019s. We show that when the CPS is sufficiently large, no algorithm can achieve a BAI probability tending to 1 as T{\\rightarrow}$\\infty$. Numerical experiments corroborate our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhong21a/zhong21a.pdf", "supp": "", "pdf_size": 481412, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17868833179563071427&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, National University of Singapore, Singapore + Institute of Operations Research and Analytics, National University of Singapore, Singapore + Department of Electrical and Computer Engineering, National University of Singapore, Singapore; Department of Industrial Systems and Management, National University of Singapore, Singapore + Institute of Operations Research and Analytics, National University of Singapore, Singapore + Department of Electrical and Computer Engineering, National University of Singapore, Singapore; Department of Mathematics, National University of Singapore, Singapore + Institute of Operations Research and Analytics, National University of Singapore, Singapore + Department of Electrical and Computer Engineering, National University of Singapore, Singapore", "aff_domain": "u.nus.edu;nus.edu.sg;nus.edu.sg", "email": "u.nus.edu;nus.edu.sg;nus.edu.sg", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhong21a.html", "aff_unique_index": "0+0+0;0+0+0;0+0+0", "aff_unique_norm": "National University of Singapore", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.nus.edu.sg", "aff_unique_abbr": "NUS", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", "aff_country_unique": "Singapore" }, { "title": "Problem Dependent View on Structured Thresholding Bandit Problems", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10401", "id": "10401", "proceeding": "http://proceedings.mlr.press/v139/cheshire21a.html", "slides": "", "author_site": "James Cheshire, Pierre Menard, Alexandra Carpentier", "author": "James Cheshire; Pierre Menard; Alexandra Carpentier", "abstract": "We investigate the \\textit{problem dependent regime} in the stochastic \\emph{Thresholding Bandit problem} (\\tbp) under several \\emph{shape constraints}. In the \\tbp the objective of the learner is to output, after interacting with the environment, the set of arms whose means are above a given threshold. The vanilla, unstructured, case is already well studied in the literature. Taking $K$ as the number of arms, we consider the case where (i) the sequence of arm\u2019s means $(\\mu_k){k=1}^K$ is monotonically increasing (\\textit{MTBP}) and (ii) the case where $(\\mu_k){k=1}^K$ is concave (\\textit{CTBP}). We consider both cases in the \\emph{problem dependent} regime and study the probability of error - i.e.\u00a0the probability to mis-classify at least one arm. In the fixed budget setting, we provide nearly matching upper and lower bounds for the probability of error in both the concave and monotone settings, as well as associated algorithms. Of interest, is that for both the monotone and concave cases, optimal bounds on probability of error are of the same order as those for the two armed bandit problem.", "bibtex": "@InProceedings{pmlr-v139-cheshire21a,\n title = \t {Problem Dependent View on Structured Thresholding Bandit Problems},\n author = {Cheshire, James and Menard, Pierre and Carpentier, Alexandra},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1846--1854},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cheshire21a/cheshire21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cheshire21a.html},\n abstract = \t {We investigate the \\textit{problem dependent regime} in the stochastic \\emph{Thresholding Bandit problem} (\\tbp) under several \\emph{shape constraints}. In the \\tbp the objective of the learner is to output, after interacting with the environment, the set of arms whose means are above a given threshold. The vanilla, unstructured, case is already well studied in the literature. Taking $K$ as the number of arms, we consider the case where (i) the sequence of arm\u2019s means $(\\mu_k){k=1}^K$ is monotonically increasing (\\textit{MTBP}) and (ii) the case where $(\\mu_k){k=1}^K$ is concave (\\textit{CTBP}). We consider both cases in the \\emph{problem dependent} regime and study the probability of error - i.e.\u00a0the probability to mis-classify at least one arm. In the fixed budget setting, we provide nearly matching upper and lower bounds for the probability of error in both the concave and monotone settings, as well as associated algorithms. Of interest, is that for both the monotone and concave cases, optimal bounds on probability of error are of the same order as those for the two armed bandit problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/cheshire21a/cheshire21a.pdf", "supp": "", "pdf_size": 368423, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4601702412136024909&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 4, "aff": "Otto von Guericke University Magdeburg; Otto von Guericke University Magdeburg; Otto von Guericke University Magdeburg", "aff_domain": "ovgu.de; ; ", "email": "ovgu.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cheshire21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Otto von Guericke University", "aff_unique_dep": "", "aff_unique_url": "https://www.ovgu.de", "aff_unique_abbr": "OVGU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Magdeburg", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Progressive-Scale Boundary Blackbox Attack via Projective Gradient Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9235", "id": "9235", "proceeding": "http://proceedings.mlr.press/v139/zhang21l.html", "slides": "/media/icml-2021/Slides/9235.pdf", "author_site": "Jiawei Zhang, Linyi Li, Huichen Li, Xiaolu Zhang, Shuang Yang, Bo Li", "author": "Jiawei Zhang; Linyi Li; Huichen Li; Xiaolu Zhang; Shuang Yang; Bo Li", "abstract": "Boundary based blackbox attack has been recognized as practical and effective, given that an attacker only needs to access the final model prediction. However, the query efficiency of it is in general high especially for high dimensional image data. In this paper, we show that such efficiency highly depends on the scale at which the attack is applied, and attacking at the optimal scale significantly improves the efficiency. In particular, we propose a theoretical framework to analyze and show three key characteristics to improve the query efficiency. We prove that there exists an optimal scale for projective gradient estimation. Our framework also explains the satisfactory performance achieved by existing boundary black-box attacks. Based on our theoretical framework, we propose Progressive-Scale enabled projective Boundary Attack (PSBA) to improve the query efficiency via progressive scaling techniques. In particular, we employ Progressive-GAN to optimize the scale of projections, which we call PSBA-PGAN. We evaluate our approach on both spatial and frequency scales. Extensive experiments on MNIST, CIFAR-10, CelebA, and ImageNet against different models including a real-world face recognition API show that PSBA-PGAN significantly outperforms existing baseline attacks in terms of query efficiency and attack success rate. We also observe relatively stable optimal scales for different models and datasets. The code is publicly available at https://github.com/AI-secure/PSBA.", "bibtex": "@InProceedings{pmlr-v139-zhang21l,\n title = \t {Progressive-Scale Boundary Blackbox Attack via Projective Gradient Estimation},\n author = {Zhang, Jiawei and Li, Linyi and Li, Huichen and Zhang, Xiaolu and Yang, Shuang and Li, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12479--12490},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21l/zhang21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21l.html},\n abstract = \t {Boundary based blackbox attack has been recognized as practical and effective, given that an attacker only needs to access the final model prediction. However, the query efficiency of it is in general high especially for high dimensional image data. In this paper, we show that such efficiency highly depends on the scale at which the attack is applied, and attacking at the optimal scale significantly improves the efficiency. In particular, we propose a theoretical framework to analyze and show three key characteristics to improve the query efficiency. We prove that there exists an optimal scale for projective gradient estimation. Our framework also explains the satisfactory performance achieved by existing boundary black-box attacks. Based on our theoretical framework, we propose Progressive-Scale enabled projective Boundary Attack (PSBA) to improve the query efficiency via progressive scaling techniques. In particular, we employ Progressive-GAN to optimize the scale of projections, which we call PSBA-PGAN. We evaluate our approach on both spatial and frequency scales. Extensive experiments on MNIST, CIFAR-10, CelebA, and ImageNet against different models including a real-world face recognition API show that PSBA-PGAN significantly outperforms existing baseline attacks in terms of query efficiency and attack success rate. We also observe relatively stable optimal scales for different models and datasets. The code is publicly available at https://github.com/AI-secure/PSBA.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21l/zhang21l.pdf", "supp": "", "pdf_size": 2076207, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2561734592069193549&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "https://github.com/AI-secure/PSBA", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhang21l.html" }, { "title": "Projection Robust Wasserstein Barycenters", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10607", "id": "10607", "proceeding": "http://proceedings.mlr.press/v139/huang21f.html", "slides": "/media/icml-2021/Slides/10607.pdf", "author_site": "Minhui Huang, Shiqian Ma, Lifeng Lai", "author": "Minhui Huang; Shiqian Ma; Lifeng Lai", "abstract": "Collecting and aggregating information from several probability measures or histograms is a fundamental task in machine learning. One of the popular solution methods for this task is to compute the barycenter of the probability measures under the Wasserstein metric. However, approximating the Wasserstein barycenter is numerically challenging because of the curse of dimensionality. This paper proposes the projection robust Wasserstein barycenter (PRWB) that has the potential to mitigate the curse of dimensionality, and a relaxed PRWB (RPRWB) model that is computationally more tractable. By combining the iterative Bregman projection algorithm and Riemannian optimization, we propose two algorithms for computing the RPRWB, which is a max-min problem over the Stiefel manifold. The complexity of arithmetic operations of the proposed algorithms for obtaining an $\\epsilon$-stationary solution is analyzed. We incorporate the RPRWB into a discrete distribution clustering algorithm, and the numerical results on real text datasets confirm that our RPRWB model helps improve the clustering performance significantly.", "bibtex": "@InProceedings{pmlr-v139-huang21f,\n title = \t {Projection Robust Wasserstein Barycenters},\n author = {Huang, Minhui and Ma, Shiqian and Lai, Lifeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4456--4465},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21f/huang21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21f.html},\n abstract = \t {Collecting and aggregating information from several probability measures or histograms is a fundamental task in machine learning. One of the popular solution methods for this task is to compute the barycenter of the probability measures under the Wasserstein metric. However, approximating the Wasserstein barycenter is numerically challenging because of the curse of dimensionality. This paper proposes the projection robust Wasserstein barycenter (PRWB) that has the potential to mitigate the curse of dimensionality, and a relaxed PRWB (RPRWB) model that is computationally more tractable. By combining the iterative Bregman projection algorithm and Riemannian optimization, we propose two algorithms for computing the RPRWB, which is a max-min problem over the Stiefel manifold. The complexity of arithmetic operations of the proposed algorithms for obtaining an $\\epsilon$-stationary solution is analyzed. We incorporate the RPRWB into a discrete distribution clustering algorithm, and the numerical results on real text datasets confirm that our RPRWB model helps improve the clustering performance significantly.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21f/huang21f.pdf", "supp": "", "pdf_size": 767775, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15443830081119328709&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical and Computer Engineering, University of California, Davis, CA, USA; Department of Mathematics, University of California, Davis, CA, USA; Department of Electrical and Computer Engineering, University of California, Davis, CA, USA", "aff_domain": "ucdavis.edu;ucdavis.edu;ucdavis.edu", "email": "ucdavis.edu;ucdavis.edu;ucdavis.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/huang21f.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Davis", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucdavis.edu", "aff_unique_abbr": "UC Davis", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Davis", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Projection techniques to update the truncated SVD of evolving matrices with applications", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10159", "id": "10159", "proceeding": "http://proceedings.mlr.press/v139/kalantzis21a.html", "slides": "", "author_site": "Vasileios Kalantzis, Georgios Kollias, Shashanka Ubaru, Athanasios N. Nikolakopoulos, Lior Horesh, Kenneth Clarkson", "author": "Vasileios Kalantzis; Georgios Kollias; Shashanka Ubaru; Athanasios N. Nikolakopoulos; Lior Horesh; Kenneth Clarkson", "abstract": "This submission considers the problem of updating the rank-$k$ truncated Singular Value Decomposition (SVD) of matrices subject to the addition of new rows and/or columns over time. Such matrix problems represent an important computational kernel in applications such as Latent Semantic Indexing and Recommender Systems. Nonetheless, the proposed framework is purely algebraic and targets general updating problems. The algorithm presented in this paper undertakes a projection viewpoint and focuses on building a pair of subspaces which approximate the linear span of the sought singular vectors of the updated matrix. We discuss and analyze two different choices to form the projection subspaces. Results on matrices from real applications suggest that the proposed algorithm can lead to higher accuracy, especially for the singular triplets associated with the largest modulus singular values. Several practical details and key differences with other approaches are also discussed.", "bibtex": "@InProceedings{pmlr-v139-kalantzis21a,\n title = \t {Projection techniques to update the truncated SVD of evolving matrices with applications},\n author = {Kalantzis, Vasileios and Kollias, Georgios and Ubaru, Shashanka and Nikolakopoulos, Athanasios N. and Horesh, Lior and Clarkson, Kenneth},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5236--5246},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kalantzis21a/kalantzis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kalantzis21a.html},\n abstract = \t {This submission considers the problem of updating the rank-$k$ truncated Singular Value Decomposition (SVD) of matrices subject to the addition of new rows and/or columns over time. Such matrix problems represent an important computational kernel in applications such as Latent Semantic Indexing and Recommender Systems. Nonetheless, the proposed framework is purely algebraic and targets general updating problems. The algorithm presented in this paper undertakes a projection viewpoint and focuses on building a pair of subspaces which approximate the linear span of the sought singular vectors of the updated matrix. We discuss and analyze two different choices to form the projection subspaces. Results on matrices from real applications suggest that the proposed algorithm can lead to higher accuracy, especially for the singular triplets associated with the largest modulus singular values. Several practical details and key differences with other approaches are also discussed.}\n}", "pdf": "http://proceedings.mlr.press/v139/kalantzis21a/kalantzis21a.pdf", "supp": "", "pdf_size": 904740, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7118024440500190867&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "IBM Research, Thomas J. Watson Research Center, Yorktown Heights, NY 10598; IBM Research, Thomas J. Watson Research Center, Yorktown Heights, NY 10598; IBM Research, Thomas J. Watson Research Center, Yorktown Heights, NY 10598; Amazon, 550 Terry Ave N, Seattle, WA 98109 (work done prior to joining Amazon); IBM Research, Thomas J. Watson Research Center, Yorktown Heights, NY 10598; IBM Research, Almaden Research Center, San Jose, CA 95120", "aff_domain": "ibm.com; ; ; ; ; ", "email": "ibm.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/kalantzis21a.html", "aff_unique_index": "0;0;0;1;0;0", "aff_unique_norm": "IBM;Amazon", "aff_unique_dep": "IBM Research;Amazon", "aff_unique_url": "https://www.ibm.com/research;https://www.amazon.com", "aff_unique_abbr": "IBM;Amazon", "aff_campus_unique_index": "0;0;0;1;0;2", "aff_campus_unique": "Yorktown Heights;Seattle;Almaden Research Center", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Provable Generalization of SGD-trained Neural Networks of Any Width in the Presence of Adversarial Label Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10399", "id": "10399", "proceeding": "http://proceedings.mlr.press/v139/frei21b.html", "slides": "/media/icml-2021/Slides/10399.pdf", "author_site": "Spencer Frei, Yuan Cao, Quanquan Gu", "author": "Spencer Frei; Yuan Cao; Quanquan Gu", "abstract": "We consider a one-hidden-layer leaky ReLU network of arbitrary width trained by stochastic gradient descent (SGD) following an arbitrary initialization. We prove that SGD produces neural networks that have classification accuracy competitive with that of the best halfspace over the distribution for a broad class of distributions that includes log-concave isotropic and hard margin distributions. Equivalently, such networks can generalize when the data distribution is linearly separable but corrupted with adversarial label noise, despite the capacity to overfit. To the best of our knowledge, this is the first work to show that overparameterized neural networks trained by SGD can generalize when the data is corrupted with adversarial label noise.", "bibtex": "@InProceedings{pmlr-v139-frei21b,\n title = \t {Provable Generalization of SGD-trained Neural Networks of Any Width in the Presence of Adversarial Label Noise},\n author = {Frei, Spencer and Cao, Yuan and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3427--3438},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/frei21b/frei21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/frei21b.html},\n abstract = \t {We consider a one-hidden-layer leaky ReLU network of arbitrary width trained by stochastic gradient descent (SGD) following an arbitrary initialization. We prove that SGD produces neural networks that have classification accuracy competitive with that of the best halfspace over the distribution for a broad class of distributions that includes log-concave isotropic and hard margin distributions. Equivalently, such networks can generalize when the data distribution is linearly separable but corrupted with adversarial label noise, despite the capacity to overfit. To the best of our knowledge, this is the first work to show that overparameterized neural networks trained by SGD can generalize when the data is corrupted with adversarial label noise.}\n}", "pdf": "http://proceedings.mlr.press/v139/frei21b/frei21b.pdf", "supp": "", "pdf_size": 466059, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10029653979209669660&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Statistics, UCLA; Department of Computer Science, UCLA; Department of Computer Science, UCLA", "aff_domain": "stats.ucla.edu;cs.ucla.edu;cs.ucla.edu", "email": "stats.ucla.edu;cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/frei21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Statistics", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Provable Lipschitz Certification for Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9171", "id": "9171", "proceeding": "http://proceedings.mlr.press/v139/jordan21a.html", "slides": "", "author_site": "Matt Jordan, Alexandros Dimakis", "author": "Matt Jordan; Alex Dimakis", "abstract": "We present a scalable technique for upper bounding the Lipschitz constant of generative models. We relate this quantity to the maximal norm over the set of attainable vector-Jacobian products of a given generative model. We approximate this set by layerwise convex approximations using zonotopes. Our approach generalizes and improves upon prior work using zonotope transformers and we extend to Lipschitz estimation of neural networks with large output dimension. This provides efficient and tight bounds on small networks and can scale to generative models on VAE and DCGAN architectures.", "bibtex": "@InProceedings{pmlr-v139-jordan21a,\n title = \t {Provable Lipschitz Certification for Generative Models},\n author = {Jordan, Matt and Dimakis, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5118--5126},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jordan21a/jordan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jordan21a.html},\n abstract = \t {We present a scalable technique for upper bounding the Lipschitz constant of generative models. We relate this quantity to the maximal norm over the set of attainable vector-Jacobian products of a given generative model. We approximate this set by layerwise convex approximations using zonotopes. Our approach generalizes and improves upon prior work using zonotope transformers and we extend to Lipschitz estimation of neural networks with large output dimension. This provides efficient and tight bounds on small networks and can scale to generative models on VAE and DCGAN architectures.}\n}", "pdf": "http://proceedings.mlr.press/v139/jordan21a/jordan21a.pdf", "supp": "", "pdf_size": 515467, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12680803124320000894&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Texas at Austin; University of Texas at Austin", "aff_domain": "cs.utexas.edu; ", "email": "cs.utexas.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jordan21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Provable Meta-Learning of Linear Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9333", "id": "9333", "proceeding": "http://proceedings.mlr.press/v139/tripuraneni21a.html", "slides": "", "author_site": "Nilesh Tripuraneni, Chi Jin, Michael Jordan", "author": "Nilesh Tripuraneni; Chi Jin; Michael Jordan", "abstract": "Meta-learning, or learning-to-learn, seeks to design algorithms that can utilize previous experience to rapidly learn new skills or adapt to new environments. Representation learning\u2014a key tool for performing meta-learning\u2014learns a data representation that can transfer knowledge across multiple tasks, which is essential in regimes where data is scarce. Despite a recent surge of interest in the practice of meta-learning, the theoretical underpinnings of meta-learning algorithms are lacking, especially in the context of learning transferable representations. In this paper, we focus on the problem of multi-task linear regression\u2014in which multiple linear regression models share a common, low-dimensional linear representation. Here, we provide provably fast, sample-efficient algorithms to address the dual challenges of (1) learning a common set of features from multiple, related tasks, and (2) transferring this knowledge to new, unseen tasks. Both are central to the general problem of meta-learning. Finally, we complement these results by providing information-theoretic lower bounds on the sample complexity of learning these linear features.", "bibtex": "@InProceedings{pmlr-v139-tripuraneni21a,\n title = \t {Provable Meta-Learning of Linear Representations},\n author = {Tripuraneni, Nilesh and Jin, Chi and Jordan, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10434--10443},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tripuraneni21a/tripuraneni21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tripuraneni21a.html},\n abstract = \t {Meta-learning, or learning-to-learn, seeks to design algorithms that can utilize previous experience to rapidly learn new skills or adapt to new environments. Representation learning\u2014a key tool for performing meta-learning\u2014learns a data representation that can transfer knowledge across multiple tasks, which is essential in regimes where data is scarce. Despite a recent surge of interest in the practice of meta-learning, the theoretical underpinnings of meta-learning algorithms are lacking, especially in the context of learning transferable representations. In this paper, we focus on the problem of multi-task linear regression\u2014in which multiple linear regression models share a common, low-dimensional linear representation. Here, we provide provably fast, sample-efficient algorithms to address the dual challenges of (1) learning a common set of features from multiple, related tasks, and (2) transferring this knowledge to new, unseen tasks. Both are central to the general problem of meta-learning. Finally, we complement these results by providing information-theoretic lower bounds on the sample complexity of learning these linear features.}\n}", "pdf": "http://proceedings.mlr.press/v139/tripuraneni21a/tripuraneni21a.pdf", "supp": "", "pdf_size": 569631, "gs_citation": 231, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14454744225976907789&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of EECS, University of California, Berkeley; Department of Electrical Engineering, Princeton University; Department of EECS, University of California, Berkeley", "aff_domain": "berkeley.edu; ; ", "email": "berkeley.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tripuraneni21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Berkeley;Princeton University", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences;Department of Electrical Engineering", "aff_unique_url": "https://www.berkeley.edu;https://www.princeton.edu", "aff_unique_abbr": "UC Berkeley;Princeton", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Provable Robustness of Adversarial Training for Learning Halfspaces with Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8973", "id": "8973", "proceeding": "http://proceedings.mlr.press/v139/zou21a.html", "slides": "", "author_site": "Difan Zou, Spencer Frei, Quanquan Gu", "author": "Difan Zou; Spencer Frei; Quanquan Gu", "abstract": "We analyze the properties of adversarial training for learning adversarially robust halfspaces in the presence of agnostic label noise. Denoting $\\mathsf{OPT}_{p,r}$ as the best classification error achieved by a halfspace that is robust to perturbations of $\\ell^{p}$ balls of radius $r$, we show that adversarial training on the standard binary cross-entropy loss yields adversarially robust halfspaces up to classification error $\\tilde O(\\sqrt{\\mathsf{OPT}_{2,r}})$ for $p=2$, and $\\tilde O(d^{1/4} \\sqrt{\\mathsf{OPT}_{\\infty, r}})$ when $p=\\infty$. Our results hold for distributions satisfying anti-concentration properties enjoyed by log-concave isotropic distributions among others. We additionally show that if one instead uses a non-convex sigmoidal loss, adversarial training yields halfspaces with an improved robust classification error of $O(\\mathsf{OPT}_{2,r})$ for $p=2$, and $O(d^{1/4} \\mathsf{OPT}_{\\infty, r})$ when $p=\\infty$. To the best of our knowledge, this is the first work showing that adversarial training provably yields robust classifiers in the presence of noise.", "bibtex": "@InProceedings{pmlr-v139-zou21a,\n title = \t {Provable Robustness of Adversarial Training for Learning Halfspaces with Noise},\n author = {Zou, Difan and Frei, Spencer and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {13002--13011},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zou21a/zou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zou21a.html},\n abstract = \t {We analyze the properties of adversarial training for learning adversarially robust halfspaces in the presence of agnostic label noise. Denoting $\\mathsf{OPT}_{p,r}$ as the best classification error achieved by a halfspace that is robust to perturbations of $\\ell^{p}$ balls of radius $r$, we show that adversarial training on the standard binary cross-entropy loss yields adversarially robust halfspaces up to classification error $\\tilde O(\\sqrt{\\mathsf{OPT}_{2,r}})$ for $p=2$, and $\\tilde O(d^{1/4} \\sqrt{\\mathsf{OPT}_{\\infty, r}})$ when $p=\\infty$. Our results hold for distributions satisfying anti-concentration properties enjoyed by log-concave isotropic distributions among others. We additionally show that if one instead uses a non-convex sigmoidal loss, adversarial training yields halfspaces with an improved robust classification error of $O(\\mathsf{OPT}_{2,r})$ for $p=2$, and $O(d^{1/4} \\mathsf{OPT}_{\\infty, r})$ when $p=\\infty$. To the best of our knowledge, this is the first work showing that adversarial training provably yields robust classifiers in the presence of noise.}\n}", "pdf": "http://proceedings.mlr.press/v139/zou21a/zou21a.pdf", "supp": "", "pdf_size": 460824, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7945953018023246255&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, UCLA; Department of Statistics, UCLA; Department of Computer Science, UCLA", "aff_domain": "cs.ucla.edu; ;cs.ucla.edu", "email": "cs.ucla.edu; ;cs.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zou21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Provably Correct Optimization and Exploration with Non-linear Policies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9303", "id": "9303", "proceeding": "http://proceedings.mlr.press/v139/feng21e.html", "slides": "", "author_site": "Fei Feng, Wotao Yin, Alekh Agarwal, Lin Yang", "author": "Fei Feng; Wotao Yin; Alekh Agarwal; Lin Yang", "abstract": "Policy optimization methods remain a powerful workhorse in empirical Reinforcement Learning (RL), with a focus on neural policies that can easily reason over complex and continuous state and/or action spaces. Theoretical understanding of strategic exploration in policy-based methods with non-linear function approximation, however, is largely missing. In this paper, we address this question by designing ENIAC, an actor-critic method that allows non-linear function approximation in the critic. We show that under certain assumptions, e.g., a bounded eluder dimension $d$ for the critic class, the learner finds to a near-optimal policy in $\\widetilde{O}(\\mathrm{poly}(d))$ exploration rounds. The method is robust to model misspecification and strictly extends existing works on linear function approximation. We also develop some computational optimizations of our approach with slightly worse statistical guarantees, and an empirical adaptation building on existing deep RL tools. We empirically evaluate this adaptation, and show that it outperforms prior heuristics inspired by linear methods, establishing the value in correctly reasoning about the agent\u2019s uncertainty under non-linear function approximation.", "bibtex": "@InProceedings{pmlr-v139-feng21e,\n title = \t {Provably Correct Optimization and Exploration with Non-linear Policies},\n author = {Feng, Fei and Yin, Wotao and Agarwal, Alekh and Yang, Lin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3263--3273},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21e/feng21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21e.html},\n abstract = \t {Policy optimization methods remain a powerful workhorse in empirical Reinforcement Learning (RL), with a focus on neural policies that can easily reason over complex and continuous state and/or action spaces. Theoretical understanding of strategic exploration in policy-based methods with non-linear function approximation, however, is largely missing. In this paper, we address this question by designing ENIAC, an actor-critic method that allows non-linear function approximation in the critic. We show that under certain assumptions, e.g., a bounded eluder dimension $d$ for the critic class, the learner finds to a near-optimal policy in $\\widetilde{O}(\\mathrm{poly}(d))$ exploration rounds. The method is robust to model misspecification and strictly extends existing works on linear function approximation. We also develop some computational optimizations of our approach with slightly worse statistical guarantees, and an empirical adaptation building on existing deep RL tools. We empirically evaluate this adaptation, and show that it outperforms prior heuristics inspired by linear methods, establishing the value in correctly reasoning about the agent\u2019s uncertainty under non-linear function approximation.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21e/feng21e.pdf", "supp": "", "pdf_size": 951346, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5246454033177283474&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/feng21e.html" }, { "title": "Provably Efficient Algorithms for Multi-Objective Competitive RL", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9339", "id": "9339", "proceeding": "http://proceedings.mlr.press/v139/yu21b.html", "slides": "", "author_site": "Tiancheng Yu, Yi Tian, Jingzhao Zhang, Suvrit Sra", "author": "Tiancheng Yu; Yi Tian; Jingzhao Zhang; Suvrit Sra", "abstract": "We study multi-objective reinforcement learning (RL) where an agent\u2019s reward is represented as a vector. In settings where an agent competes against opponents, its performance is measured by the distance of its average return vector to a target set. We develop statistically and computationally efficient algorithms to approach the associated target set. Our results extend Blackwell\u2019s approachability theorem\u00a0\\citep{blackwell1956analog} to tabular RL, where strategic exploration becomes essential. The algorithms presented are adaptive; their guarantees hold even without Blackwell\u2019s approachability condition. If the opponents use fixed policies, we give an improved rate of approaching the target set while also tackling the more ambitious goal of simultaneously minimizing a scalar cost function. We discuss our analysis for this special case by relating our results to previous works on constrained RL. To our knowledge, this work provides the first provably efficient algorithms for vector-valued Markov games and our theoretical guarantees are near-optimal.", "bibtex": "@InProceedings{pmlr-v139-yu21b,\n title = \t {Provably Efficient Algorithms for Multi-Objective Competitive RL},\n author = {Yu, Tiancheng and Tian, Yi and Zhang, Jingzhao and Sra, Suvrit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12167--12176},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21b/yu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21b.html},\n abstract = \t {We study multi-objective reinforcement learning (RL) where an agent\u2019s reward is represented as a vector. In settings where an agent competes against opponents, its performance is measured by the distance of its average return vector to a target set. We develop statistically and computationally efficient algorithms to approach the associated target set. Our results extend Blackwell\u2019s approachability theorem\u00a0\\citep{blackwell1956analog} to tabular RL, where strategic exploration becomes essential. The algorithms presented are adaptive; their guarantees hold even without Blackwell\u2019s approachability condition. If the opponents use fixed policies, we give an improved rate of approaching the target set while also tackling the more ambitious goal of simultaneously minimizing a scalar cost function. We discuss our analysis for this special case by relating our results to previous works on constrained RL. To our knowledge, this work provides the first provably efficient algorithms for vector-valued Markov games and our theoretical guarantees are near-optimal.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21b/yu21b.pdf", "supp": "", "pdf_size": 462838, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12741728398655062294&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of EECS, MIT, Cambridge, USA; Department of EECS, MIT, Cambridge, USA; Department of EECS, MIT, Cambridge, USA; Department of EECS, MIT, Cambridge, USA", "aff_domain": "mit.edu; ; ; ", "email": "mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yu21b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Provably Efficient Fictitious Play Policy Optimization for Zero-Sum Markov Games with Structured Transitions", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8773", "id": "8773", "proceeding": "http://proceedings.mlr.press/v139/qiu21b.html", "slides": "", "author_site": "Shuang Qiu, Xiaohan Wei, Jieping Ye, Zhaoran Wang, Zhuoran Yang", "author": "Shuang Qiu; Xiaohan Wei; Jieping Ye; Zhaoran Wang; Zhuoran Yang", "abstract": "While single-agent policy optimization in a fixed environment has attracted a lot of research attention recently in the reinforcement learning community, much less is known theoretically when there are multiple agents playing in a potentially competitive environment. We take steps forward by proposing and analyzing new fictitious play policy optimization algorithms for two-player zero-sum Markov games with structured but unknown transitions. We consider two classes of transition structures: factored independent transition and single-controller transition. For both scenarios, we prove tight $\\widetilde{\\mathcal{O}}(\\sqrt{T})$ regret bounds after $T$ steps in a two-agent competitive game scenario. The regret of each player is measured against a potentially adversarial opponent who can choose a single best policy in hindsight after observing the full policy sequence. Our algorithms feature a combination of Upper Confidence Bound (UCB)-type optimism and fictitious play under the scope of simultaneous policy optimization in a non-stationary environment. When both players adopt the proposed algorithms, their overall optimality gap is $\\widetilde{\\mathcal{O}}(\\sqrt{T})$.", "bibtex": "@InProceedings{pmlr-v139-qiu21b,\n title = \t {Provably Efficient Fictitious Play Policy Optimization for Zero-Sum Markov Games with Structured Transitions},\n author = {Qiu, Shuang and Wei, Xiaohan and Ye, Jieping and Wang, Zhaoran and Yang, Zhuoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8715--8725},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/qiu21b/qiu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/qiu21b.html},\n abstract = \t {While single-agent policy optimization in a fixed environment has attracted a lot of research attention recently in the reinforcement learning community, much less is known theoretically when there are multiple agents playing in a potentially competitive environment. We take steps forward by proposing and analyzing new fictitious play policy optimization algorithms for two-player zero-sum Markov games with structured but unknown transitions. We consider two classes of transition structures: factored independent transition and single-controller transition. For both scenarios, we prove tight $\\widetilde{\\mathcal{O}}(\\sqrt{T})$ regret bounds after $T$ steps in a two-agent competitive game scenario. The regret of each player is measured against a potentially adversarial opponent who can choose a single best policy in hindsight after observing the full policy sequence. Our algorithms feature a combination of Upper Confidence Bound (UCB)-type optimism and fictitious play under the scope of simultaneous policy optimization in a non-stationary environment. When both players adopt the proposed algorithms, their overall optimality gap is $\\widetilde{\\mathcal{O}}(\\sqrt{T})$.}\n}", "pdf": "http://proceedings.mlr.press/v139/qiu21b/qiu21b.pdf", "supp": "", "pdf_size": 329907, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8666907217599095646&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Michigan; Facebook, Inc.; University of Michigan; Northwestern University; Princeton University", "aff_domain": "umich.edu;fb.com;umich.edu;gmail.com;princeton.edu", "email": "umich.edu;fb.com;umich.edu;gmail.com;princeton.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/qiu21b.html", "aff_unique_index": "0;1;0;2;3", "aff_unique_norm": "University of Michigan;Meta;Northwestern University;Princeton University", "aff_unique_dep": ";Facebook;;", "aff_unique_url": "https://www.umich.edu;https://www.facebook.com;https://www.northwestern.edu;https://www.princeton.edu", "aff_unique_abbr": "UM;FB;NU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Provably Efficient Learning of Transferable Rewards", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10567", "id": "10567", "proceeding": "http://proceedings.mlr.press/v139/metelli21a.html", "slides": "/media/icml-2021/Slides/10567_IL7CFeK.pdf", "author_site": "Alberto Maria Metelli, Giorgia Ramponi, Alessandro Concetti, Marcello Restelli", "author": "Alberto Maria Metelli; Giorgia Ramponi; Alessandro Concetti; Marcello Restelli", "abstract": "The reward function is widely accepted as a succinct, robust, and transferable representation of a task. Typical approaches, at the basis of Inverse Reinforcement Learning (IRL), leverage on expert demonstrations to recover a reward function. In this paper, we study the theoretical properties of the class of reward functions that are compatible with the expert\u2019s behavior. We analyze how the limited knowledge of the expert\u2019s policy and of the environment affects the reward reconstruction phase. Then, we examine how the error propagates to the learned policy\u2019s performance when transferring the reward function to a different environment. We employ these findings to devise a provably efficient active sampling approach, aware of the need for transferring the reward function, that can be paired with a large variety of IRL algorithms. Finally, we provide numerical simulations on benchmark environments.", "bibtex": "@InProceedings{pmlr-v139-metelli21a,\n title = \t {Provably Efficient Learning of Transferable Rewards},\n author = {Metelli, Alberto Maria and Ramponi, Giorgia and Concetti, Alessandro and Restelli, Marcello},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7665--7676},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/metelli21a/metelli21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/metelli21a.html},\n abstract = \t {The reward function is widely accepted as a succinct, robust, and transferable representation of a task. Typical approaches, at the basis of Inverse Reinforcement Learning (IRL), leverage on expert demonstrations to recover a reward function. In this paper, we study the theoretical properties of the class of reward functions that are compatible with the expert\u2019s behavior. We analyze how the limited knowledge of the expert\u2019s policy and of the environment affects the reward reconstruction phase. Then, we examine how the error propagates to the learned policy\u2019s performance when transferring the reward function to a different environment. We employ these findings to devise a provably efficient active sampling approach, aware of the need for transferring the reward function, that can be paired with a large variety of IRL algorithms. Finally, we provide numerical simulations on benchmark environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/metelli21a/metelli21a.pdf", "supp": "", "pdf_size": 574480, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3134164627990276005&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Dipartimento di Elettronica, Informazione e Bioingegneria, Politecnico di Milano, Milan, Italy; Dipartimento di Elettronica, Informazione e Bioingegneria, Politecnico di Milano, Milan, Italy; Dipartimento di Elettronica, Informazione e Bioingegneria, Politecnico di Milano, Milan, Italy; Dipartimento di Elettronica, Informazione e Bioingegneria, Politecnico di Milano, Milan, Italy", "aff_domain": "polimi.it; ; ; ", "email": "polimi.it; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/metelli21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Politecnico di Milano", "aff_unique_dep": "Dipartimento di Elettronica, Informazione e Bioingegneria", "aff_unique_url": "https://www.polimi.it", "aff_unique_abbr": "Politecnico di Milano", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Milan", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Italy" }, { "title": "Provably Efficient Reinforcement Learning for Discounted MDPs with Feature Mapping", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9889", "id": "9889", "proceeding": "http://proceedings.mlr.press/v139/zhou21a.html", "slides": "", "author_site": "Dongruo Zhou, Jiafan He, Quanquan Gu", "author": "Dongruo Zhou; Jiafan He; Quanquan Gu", "abstract": "Modern tasks in reinforcement learning have large state and action spaces. To deal with them efficiently, one often uses predefined feature mapping to represent states and actions in a low dimensional space. In this paper, we study reinforcement learning for discounted Markov Decision Processes (MDPs), where the transition kernel can be parameterized as a linear function of certain feature mapping. We propose a novel algorithm which makes use of the feature mapping and obtains a $\\tilde O(d\\sqrt{T}/(1-\\gamma)^2)$ regret, where $d$ is the dimension of the feature space, $T$ is the time horizon and $\\gamma$ is the discount factor of the MDP. To the best of our knowledge, this is the first polynomial regret bound without accessing a generative model or making strong assumptions such as ergodicity of the MDP. By constructing a special class of MDPs, we also show that for any algorithms, the regret is lower bounded by $\\Omega(d\\sqrt{T}/(1-\\gamma)^{1.5})$. Our upper and lower bound results together suggest that the proposed reinforcement learning algorithm is near-optimal up to a $(1-\\gamma)^{-0.5}$ factor.", "bibtex": "@InProceedings{pmlr-v139-zhou21a,\n title = \t {Provably Efficient Reinforcement Learning for Discounted MDPs with Feature Mapping},\n author = {Zhou, Dongruo and He, Jiafan and Gu, Quanquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12793--12802},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21a/zhou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21a.html},\n abstract = \t {Modern tasks in reinforcement learning have large state and action spaces. To deal with them efficiently, one often uses predefined feature mapping to represent states and actions in a low dimensional space. In this paper, we study reinforcement learning for discounted Markov Decision Processes (MDPs), where the transition kernel can be parameterized as a linear function of certain feature mapping. We propose a novel algorithm which makes use of the feature mapping and obtains a $\\tilde O(d\\sqrt{T}/(1-\\gamma)^2)$ regret, where $d$ is the dimension of the feature space, $T$ is the time horizon and $\\gamma$ is the discount factor of the MDP. To the best of our knowledge, this is the first polynomial regret bound without accessing a generative model or making strong assumptions such as ergodicity of the MDP. By constructing a special class of MDPs, we also show that for any algorithms, the regret is lower bounded by $\\Omega(d\\sqrt{T}/(1-\\gamma)^{1.5})$. Our upper and lower bound results together suggest that the proposed reinforcement learning algorithm is near-optimal up to a $(1-\\gamma)^{-0.5}$ factor.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21a/zhou21a.pdf", "supp": "", "pdf_size": 370646, "gs_citation": 154, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2456357552991059105&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of California, Los Angeles, CA 90095, USA; Department of Computer Science, University of California, Los Angeles, CA 90095, USA; Department of Computer Science, University of California, Los Angeles, CA 90095, USA", "aff_domain": "cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", "email": "cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhou21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Provably End-to-end Label-noise Learning without Anchor Points", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10491", "id": "10491", "proceeding": "http://proceedings.mlr.press/v139/li21l.html", "slides": "", "author_site": "Xuefeng Li, Tongliang Liu, Bo Han, Gang Niu, Masashi Sugiyama", "author": "Xuefeng Li; Tongliang Liu; Bo Han; Gang Niu; Masashi Sugiyama", "abstract": "In label-noise learning, the transition matrix plays a key role in building statistically consistent classifiers. Existing consistent estimators for the transition matrix have been developed by exploiting anchor points. However, the anchor-point assumption is not always satisfied in real scenarios. In this paper, we propose an end-to-end framework for solving label-noise learning without anchor points, in which we simultaneously optimize two objectives: the cross entropy loss between the noisy label and the predicted probability by the neural network, and the volume of the simplex formed by the columns of the transition matrix. Our proposed framework can identify the transition matrix if the clean class-posterior probabilities are sufficiently scattered. This is by far the mildest assumption under which the transition matrix is provably identifiable and the learned classifier is statistically consistent. Experimental results on benchmark datasets demonstrate the effectiveness and robustness of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-li21l,\n title = \t {Provably End-to-end Label-noise Learning without Anchor Points},\n author = {Li, Xuefeng and Liu, Tongliang and Han, Bo and Niu, Gang and Sugiyama, Masashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6403--6413},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21l/li21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21l.html},\n abstract = \t {In label-noise learning, the transition matrix plays a key role in building statistically consistent classifiers. Existing consistent estimators for the transition matrix have been developed by exploiting anchor points. However, the anchor-point assumption is not always satisfied in real scenarios. In this paper, we propose an end-to-end framework for solving label-noise learning without anchor points, in which we simultaneously optimize two objectives: the cross entropy loss between the noisy label and the predicted probability by the neural network, and the volume of the simplex formed by the columns of the transition matrix. Our proposed framework can identify the transition matrix if the clean class-posterior probabilities are sufficiently scattered. This is by far the mildest assumption under which the transition matrix is provably identifiable and the learned classifier is statistically consistent. Experimental results on benchmark datasets demonstrate the effectiveness and robustness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21l/li21l.pdf", "supp": "", "pdf_size": 967477, "gs_citation": 165, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9258083582460233447&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of New South Wales; Trustworthy Machine Learning Lab, University of Sydney; Hong Kong Baptist University; RIKEN AIP; University of Tokyo", "aff_domain": "unsw.edu.au;sydney.edu.au;hkbu.edu.hk;aist.go.jp;k.u-tokyo.ac.jp", "email": "unsw.edu.au;sydney.edu.au;hkbu.edu.hk;aist.go.jp;k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/li21l.html", "aff_unique_index": "0;1;2;3;4", "aff_unique_norm": "University of New South Wales;University of Sydney;Hong Kong Baptist University;RIKEN;University of Tokyo", "aff_unique_dep": ";Trustworthy Machine Learning Lab;;Advanced Institute for Computational Science;", "aff_unique_url": "https://www.unsw.edu.au;https://www.sydney.edu.au;https://www.hkbu.edu.hk;https://www.aip.riken.jp;https://www.u-tokyo.ac.jp", "aff_unique_abbr": "UNSW;USYD;HKBU;RIKEN AIP;UTokyo", "aff_campus_unique_index": "1", "aff_campus_unique": ";Hong Kong SAR", "aff_country_unique_index": "0;0;1;2;2", "aff_country_unique": "Australia;China;Japan" }, { "title": "Provably Strict Generalisation Benefit for Equivariant Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10385", "id": "10385", "proceeding": "http://proceedings.mlr.press/v139/elesedy21a.html", "slides": "/media/icml-2021/Slides/10385.pdf", "author_site": "Bryn Elesedy, Sheheryar Zaidi", "author": "Bryn Elesedy; Sheheryar Zaidi", "abstract": "It is widely believed that engineering a model to be invariant/equivariant improves generalisation. Despite the growing popularity of this approach, a precise characterisation of the generalisation benefit is lacking. By considering the simplest case of linear models, this paper provides the first provably non-zero improvement in generalisation for invariant/equivariant models when the target distribution is invariant/equivariant with respect to a compact group. Moreover, our work reveals an interesting relationship between generalisation, the number of training examples and properties of the group action. Our results rest on an observation of the structure of function spaces under averaging operators which, along with its consequences for feature averaging, may be of independent interest.", "bibtex": "@InProceedings{pmlr-v139-elesedy21a,\n title = \t {Provably Strict Generalisation Benefit for Equivariant Models},\n author = {Elesedy, Bryn and Zaidi, Sheheryar},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2959--2969},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/elesedy21a/elesedy21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/elesedy21a.html},\n abstract = \t {It is widely believed that engineering a model to be invariant/equivariant improves generalisation. Despite the growing popularity of this approach, a precise characterisation of the generalisation benefit is lacking. By considering the simplest case of linear models, this paper provides the first provably non-zero improvement in generalisation for invariant/equivariant models when the target distribution is invariant/equivariant with respect to a compact group. Moreover, our work reveals an interesting relationship between generalisation, the number of training examples and properties of the group action. Our results rest on an observation of the structure of function spaces under averaging operators which, along with its consequences for feature averaging, may be of independent interest.}\n}", "pdf": "http://proceedings.mlr.press/v139/elesedy21a/elesedy21a.pdf", "supp": "", "pdf_size": 2090960, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8839218471360235487&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Statistics, University of Oxford, Oxford, United Kingdom", "aff_domain": "robots.ox.ac.uk; ", "email": "robots.ox.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/elesedy21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Proximal Causal Learning with Kernels: Two-Stage Estimation and Moment Restriction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9925", "id": "9925", "proceeding": "http://proceedings.mlr.press/v139/mastouri21a.html", "slides": "", "author_site": "Afsaneh Mastouri, Yuchen Zhu, Limor Gultchin, Anna Korba, Ricardo Silva, Matt J. Kusner, Arthur Gretton, Krikamol Muandet", "author": "Afsaneh Mastouri; Yuchen Zhu; Limor Gultchin; Anna Korba; Ricardo Silva; Matt Kusner; Arthur Gretton; Krikamol Muandet", "abstract": "We address the problem of causal effect estima-tion in the presence of unobserved confounding,but where proxies for the latent confounder(s) areobserved. We propose two kernel-based meth-ods for nonlinear causal effect estimation in thissetting: (a) a two-stage regression approach, and(b) a maximum moment restriction approach. Wefocus on the proximal causal learning setting, butour methods can be used to solve a wider classof inverse problems characterised by a Fredholmintegral equation. In particular, we provide a uni-fying view of two-stage and moment restrictionapproaches for solving this problem in a nonlin-ear setting. We provide consistency guaranteesfor each algorithm, and demonstrate that these ap-proaches achieve competitive results on syntheticdata and data simulating a real-world task. In par-ticular, our approach outperforms earlier methodsthat are not suited to leveraging proxy variables.", "bibtex": "@InProceedings{pmlr-v139-mastouri21a,\n title = \t {Proximal Causal Learning with Kernels: Two-Stage Estimation and Moment Restriction},\n author = {Mastouri, Afsaneh and Zhu, Yuchen and Gultchin, Limor and Korba, Anna and Silva, Ricardo and Kusner, Matt and Gretton, Arthur and Muandet, Krikamol},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7512--7523},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mastouri21a/mastouri21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mastouri21a.html},\n abstract = \t {We address the problem of causal effect estima-tion in the presence of unobserved confounding,but where proxies for the latent confounder(s) areobserved. We propose two kernel-based meth-ods for nonlinear causal effect estimation in thissetting: (a) a two-stage regression approach, and(b) a maximum moment restriction approach. Wefocus on the proximal causal learning setting, butour methods can be used to solve a wider classof inverse problems characterised by a Fredholmintegral equation. In particular, we provide a uni-fying view of two-stage and moment restrictionapproaches for solving this problem in a nonlin-ear setting. We provide consistency guaranteesfor each algorithm, and demonstrate that these ap-proaches achieve competitive results on syntheticdata and data simulating a real-world task. In par-ticular, our approach outperforms earlier methodsthat are not suited to leveraging proxy variables.}\n}", "pdf": "http://proceedings.mlr.press/v139/mastouri21a/mastouri21a.pdf", "supp": "", "pdf_size": 1218292, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12361479537948670702&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University College London, London, United Kingdom; University College London, London, United Kingdom; University of Oxford, Oxford, United Kingdom; ENSAE/CREST, Paris, France; University College London, London, United Kingdom; University College London, London, United Kingdom; University College London, London, United Kingdom; Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany", "aff_domain": "gmail.com;ucl.ac.uk; ; ;ucl.ac.uk; ;gmail.com;tuebingen.mpg.de", "email": "gmail.com;ucl.ac.uk; ; ;ucl.ac.uk; ;gmail.com;tuebingen.mpg.de", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/mastouri21a.html", "aff_unique_index": "0;0;1;2;0;0;0;3", "aff_unique_norm": "University College London;University of Oxford;ENSAE;Max Planck Institute for Intelligent Systems", "aff_unique_dep": ";;CREST;", "aff_unique_url": "https://www.ucl.ac.uk;https://www.ox.ac.uk;https://www.ensae.fr;https://www.mpi-is.mpg.de", "aff_unique_abbr": "UCL;Oxford;ENSAE;MPI-IS", "aff_campus_unique_index": "0;0;1;2;0;0;0;3", "aff_campus_unique": "London;Oxford;Paris;T\u00fcbingen", "aff_country_unique_index": "0;0;0;1;0;0;0;2", "aff_country_unique": "United Kingdom;France;Germany" }, { "title": "PsiPhi-Learning: Reinforcement Learning with Demonstrations using Successor Features and Inverse Temporal Difference Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10119", "id": "10119", "proceeding": "http://proceedings.mlr.press/v139/filos21a.html", "slides": "/media/icml-2021/Slides/10119.pdf", "author_site": "Angelos Filos, Clare Lyle, Yarin Gal, Sergey Levine, Natasha Jaques, Gregory Farquhar", "author": "Angelos Filos; Clare Lyle; Yarin Gal; Sergey Levine; Natasha Jaques; Gregory Farquhar", "abstract": "We study reinforcement learning (RL) with no-reward demonstrations, a setting in which an RL agent has access to additional data from the interaction of other agents with the same environment. However, it has no access to the rewards or goals of these agents, and their objectives and levels of expertise may vary widely. These assumptions are common in multi-agent settings, such as autonomous driving. To effectively use this data, we turn to the framework of successor features. This allows us to disentangle shared features and dynamics of the environment from agent-specific rewards and policies. We propose a multi-task inverse reinforcement learning (IRL) algorithm, called \\emph{inverse temporal difference learning} (ITD), that learns shared state features, alongside per-agent successor features and preference vectors, purely from demonstrations without reward labels. We further show how to seamlessly integrate ITD with learning from online environment interactions, arriving at a novel algorithm for reinforcement learning with demonstrations, called $\\Psi \\Phi$-learning (pronounced \u2018Sci-Fi\u2019). We provide empirical evidence for the effectiveness of $\\Psi \\Phi$-learning as a method for improving RL, IRL, imitation, and few-shot transfer, and derive worst-case bounds for its performance in zero-shot transfer to new tasks.", "bibtex": "@InProceedings{pmlr-v139-filos21a,\n title = \t {PsiPhi-Learning: Reinforcement Learning with Demonstrations using Successor Features and Inverse Temporal Difference Learning},\n author = {Filos, Angelos and Lyle, Clare and Gal, Yarin and Levine, Sergey and Jaques, Natasha and Farquhar, Gregory},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3305--3317},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/filos21a/filos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/filos21a.html},\n abstract = \t {We study reinforcement learning (RL) with no-reward demonstrations, a setting in which an RL agent has access to additional data from the interaction of other agents with the same environment. However, it has no access to the rewards or goals of these agents, and their objectives and levels of expertise may vary widely. These assumptions are common in multi-agent settings, such as autonomous driving. To effectively use this data, we turn to the framework of successor features. This allows us to disentangle shared features and dynamics of the environment from agent-specific rewards and policies. We propose a multi-task inverse reinforcement learning (IRL) algorithm, called \\emph{inverse temporal difference learning} (ITD), that learns shared state features, alongside per-agent successor features and preference vectors, purely from demonstrations without reward labels. We further show how to seamlessly integrate ITD with learning from online environment interactions, arriving at a novel algorithm for reinforcement learning with demonstrations, called $\\Psi \\Phi$-learning (pronounced \u2018Sci-Fi\u2019). We provide empirical evidence for the effectiveness of $\\Psi \\Phi$-learning as a method for improving RL, IRL, imitation, and few-shot transfer, and derive worst-case bounds for its performance in zero-shot transfer to new tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/filos21a/filos21a.pdf", "supp": "", "pdf_size": 4430881, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=673567895573287554&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Oxford; University of Oxford; University of Oxford; University of California, Berkeley; University of California, Berkeley + Google Research, Brain team; DeepMind", "aff_domain": "cs.ox.ac.uk; ; ; ; ; ", "email": "cs.ox.ac.uk; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/filos21a.html", "aff_unique_index": "0;0;0;1;1+2;3", "aff_unique_norm": "University of Oxford;University of California, Berkeley;Google;DeepMind", "aff_unique_dep": ";;Google Research;", "aff_unique_url": "https://www.ox.ac.uk;https://www.berkeley.edu;https://research.google;https://deepmind.com", "aff_unique_abbr": "Oxford;UC Berkeley;Google;DeepMind", "aff_campus_unique_index": "1;1+2", "aff_campus_unique": ";Berkeley;Mountain View", "aff_country_unique_index": "0;0;0;1;1+1;0", "aff_country_unique": "United Kingdom;United States" }, { "title": "Pure Exploration and Regret Minimization in Matching Bandits", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10347", "id": "10347", "proceeding": "http://proceedings.mlr.press/v139/sentenac21a.html", "slides": "", "author_site": "Flore Sentenac, Jialin Yi, Cl\u00e9ment Calauz\u00e8nes, Vianney Perchet, Milan Vojnovic", "author": "Flore Sentenac; Jialin Yi; Clement Calauzenes; Vianney Perchet; Milan Vojnovic", "abstract": "Finding an optimal matching in a weighted graph is a standard combinatorial problem. We consider its semi-bandit version where either a pair or a full matching is sampled sequentially. We prove that it is possible to leverage a rank-1 assumption on the adjacency matrix to reduce the sample complexity and the regret of off-the-shelf algorithms up to reaching a linear dependency in the number of vertices (up to to poly-log terms).", "bibtex": "@InProceedings{pmlr-v139-sentenac21a,\n title = \t {Pure Exploration and Regret Minimization in Matching Bandits},\n author = {Sentenac, Flore and Yi, Jialin and Calauzenes, Clement and Perchet, Vianney and Vojnovic, Milan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9434--9442},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sentenac21a/sentenac21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sentenac21a.html},\n abstract = \t {Finding an optimal matching in a weighted graph is a standard combinatorial problem. We consider its semi-bandit version where either a pair or a full matching is sampled sequentially. We prove that it is possible to leverage a rank-1 assumption on the adjacency matrix to reduce the sample complexity and the regret of off-the-shelf algorithms up to reaching a linear dependency in the number of vertices (up to to poly-log terms).}\n}", "pdf": "http://proceedings.mlr.press/v139/sentenac21a/sentenac21a.pdf", "supp": "", "pdf_size": 400195, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=193189007990239138&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "CREST, ENSAE Paris, Palaiseau, France; London School of Economics, London, UK; Criteo AI Lab, Paris, France; CREST, ENSAE Paris, Palaiseau, France + Criteo AI Lab, Paris, France; London School of Economics, London, UK", "aff_domain": "gmail.com;lse.ac.uk; ; ; ", "email": "gmail.com;lse.ac.uk; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/sentenac21a.html", "aff_unique_index": "0;1;2;0+2;1", "aff_unique_norm": "CREST;London School of Economics;Criteo", "aff_unique_dep": ";;Criteo AI Lab", "aff_unique_url": ";https://www.lse.ac.uk;https://www.criteo.com", "aff_unique_abbr": ";LSE;Criteo", "aff_campus_unique_index": "0;1;2;0+2;1", "aff_campus_unique": "Palaiseau;London;Paris", "aff_country_unique_index": "0;1;0;0+0;1", "aff_country_unique": "France;United Kingdom" }, { "title": "Putting the \u201cLearning\" into Learning-Augmented Algorithms for Frequency Estimation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10501", "id": "10501", "proceeding": "http://proceedings.mlr.press/v139/du21d.html", "slides": "/media/icml-2021/Slides/10501.pdf", "author_site": "Elbert Du, Franklyn Wang, Michael Mitzenmacher", "author": "Elbert Du; Franklyn Wang; Michael Mitzenmacher", "abstract": "In learning-augmented algorithms, algorithms are enhanced using information from a machine learning algorithm. In turn, this suggests that we should tailor our machine-learning approach for the target algorithm. We here consider this synergy in the context of the learned count-min sketch from (Hsu et al., 2019). Learning here is used to predict heavy hitters from a data stream, which are counted explicitly outside the sketch. We show that an approximately sufficient statistic for the performance of the underlying count-min sketch is given by the coverage of the predictor, or the normalized $L^1$ norm of keys that are filtered by the predictor to be explicitly counted. We show that machine learning models which are trained to optimize for coverage lead to large improvements in performance over prior approaches according to the average absolute frequency error. Our source code can be found at https://github.com/franklynwang/putting-the-learning-in-LAA.", "bibtex": "@InProceedings{pmlr-v139-du21d,\n title = \t {Putting the \u201cLearning\" into Learning-Augmented Algorithms for Frequency Estimation},\n author = {Du, Elbert and Wang, Franklyn and Mitzenmacher, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2860--2869},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/du21d/du21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/du21d.html},\n abstract = \t {In learning-augmented algorithms, algorithms are enhanced using information from a machine learning algorithm. In turn, this suggests that we should tailor our machine-learning approach for the target algorithm. We here consider this synergy in the context of the learned count-min sketch from (Hsu et al., 2019). Learning here is used to predict heavy hitters from a data stream, which are counted explicitly outside the sketch. We show that an approximately sufficient statistic for the performance of the underlying count-min sketch is given by the coverage of the predictor, or the normalized $L^1$ norm of keys that are filtered by the predictor to be explicitly counted. We show that machine learning models which are trained to optimize for coverage lead to large improvements in performance over prior approaches according to the average absolute frequency error. Our source code can be found at https://github.com/franklynwang/putting-the-learning-in-LAA.}\n}", "pdf": "http://proceedings.mlr.press/v139/du21d/du21d.pdf", "supp": "", "pdf_size": 761993, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "Department of Mathematics, Harvard University + Department of Computer Science, Harvard University; Department of Mathematics, Harvard University + Department of Computer Science, Harvard University; Department of Computer Science, Harvard University", "aff_domain": "college.havard.edu;college.havard.edu; ", "email": "college.havard.edu;college.havard.edu; ", "github": "https://github.com/franklynwang/putting-the-learning-in-LAA", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/du21d.html", "aff_unique_index": "0+0;0+0;0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.harvard.edu", "aff_unique_abbr": "Harvard", "aff_campus_unique_index": "0+0;0+0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Quantifying Availability and Discovery in Recommender Systems via Stochastic Reachability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8687", "id": "8687", "proceeding": "http://proceedings.mlr.press/v139/curmei21a.html", "slides": "", "author_site": "Mihaela Curmei, Sarah Dean, Benjamin Recht", "author": "Mihaela Curmei; Sarah Dean; Benjamin Recht", "abstract": "In this work, we consider how preference models in interactive recommendation systems determine the availability of content and users\u2019 opportunities for discovery. We propose an evaluation procedure based on stochastic reachability to quantify the maximum probability of recommending a target piece of content to an user for a set of allowable strategic modifications. This framework allows us to compute an upper bound on the likelihood of recommendation with minimal assumptions about user behavior. Stochastic reachability can be used to detect biases in the availability of content and diagnose limitations in the opportunities for discovery granted to users. We show that this metric can be computed efficiently as a convex program for a variety of practical settings, and further argue that reachability is not inherently at odds with accuracy. We demonstrate evaluations of recommendation algorithms trained on large datasets of explicit and implicit ratings. Our results illustrate how preference models, selection rules, and user interventions impact reachability and how these effects can be distributed unevenly.", "bibtex": "@InProceedings{pmlr-v139-curmei21a,\n title = \t {Quantifying Availability and Discovery in Recommender Systems via Stochastic Reachability},\n author = {Curmei, Mihaela and Dean, Sarah and Recht, Benjamin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2265--2275},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/curmei21a/curmei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/curmei21a.html},\n abstract = \t {In this work, we consider how preference models in interactive recommendation systems determine the availability of content and users\u2019 opportunities for discovery. We propose an evaluation procedure based on stochastic reachability to quantify the maximum probability of recommending a target piece of content to an user for a set of allowable strategic modifications. This framework allows us to compute an upper bound on the likelihood of recommendation with minimal assumptions about user behavior. Stochastic reachability can be used to detect biases in the availability of content and diagnose limitations in the opportunities for discovery granted to users. We show that this metric can be computed efficiently as a convex program for a variety of practical settings, and further argue that reachability is not inherently at odds with accuracy. We demonstrate evaluations of recommendation algorithms trained on large datasets of explicit and implicit ratings. Our results illustrate how preference models, selection rules, and user interventions impact reachability and how these effects can be distributed unevenly.}\n}", "pdf": "http://proceedings.mlr.press/v139/curmei21a/curmei21a.pdf", "supp": "", "pdf_size": 1012690, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6680880425324910585&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical Engineering and Computer Sciences, University of California, Berkeley, USA; Department of Electrical Engineering and Computer Sciences, University of California, Berkeley, USA; Department of Electrical Engineering and Computer Sciences, University of California, Berkeley, USA", "aff_domain": "berkeley.edu;eecs.berkeley.edu; ", "email": "berkeley.edu;eecs.berkeley.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/curmei21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Quantifying Ignorance in Individual-Level Causal-Effect Estimates under Hidden Confounding", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10143", "id": "10143", "proceeding": "http://proceedings.mlr.press/v139/jesson21a.html", "slides": "", "author_site": "Andrew Jesson, S\u00f6ren Mindermann, Yarin Gal, Uri Shalit", "author": "Andrew Jesson; S\u00f6ren Mindermann; Yarin Gal; Uri Shalit", "abstract": "We study the problem of learning conditional average treatment effects (CATE) from high-dimensional, observational data with unobserved confounders. Unobserved confounders introduce ignorance\u2014a level of unidentifiability\u2014about an individual\u2019s response to treatment by inducing bias in CATE estimates. We present a new parametric interval estimator suited for high-dimensional data, that estimates a range of possible CATE values when given a predefined bound on the level of hidden confounding. Further, previous interval estimators do not account for ignorance about the CATE associated with samples that may be underrepresented in the original study, or samples that violate the overlap assumption. Our interval estimator also incorporates model uncertainty so that practitioners can be made aware of such out-of-distribution data. We prove that our estimator converges to tight bounds on CATE when there may be unobserved confounding and assess it using semi-synthetic, high-dimensional datasets.", "bibtex": "@InProceedings{pmlr-v139-jesson21a,\n title = \t {Quantifying Ignorance in Individual-Level Causal-Effect Estimates under Hidden Confounding},\n author = {Jesson, Andrew and Mindermann, S{\\\"o}ren and Gal, Yarin and Shalit, Uri},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4829--4838},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jesson21a/jesson21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jesson21a.html},\n abstract = \t {We study the problem of learning conditional average treatment effects (CATE) from high-dimensional, observational data with unobserved confounders. Unobserved confounders introduce ignorance\u2014a level of unidentifiability\u2014about an individual\u2019s response to treatment by inducing bias in CATE estimates. We present a new parametric interval estimator suited for high-dimensional data, that estimates a range of possible CATE values when given a predefined bound on the level of hidden confounding. Further, previous interval estimators do not account for ignorance about the CATE associated with samples that may be underrepresented in the original study, or samples that violate the overlap assumption. Our interval estimator also incorporates model uncertainty so that practitioners can be made aware of such out-of-distribution data. We prove that our estimator converges to tight bounds on CATE when there may be unobserved confounding and assess it using semi-synthetic, high-dimensional datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/jesson21a/jesson21a.pdf", "supp": "", "pdf_size": 2496288, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4021084687511550592&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "OAMTL, University of Oxford; OAMTL, University of Oxford; OAMTL, University of Oxford; Machine Learning and Causal Inference in Healthcare Lab, Technion \u2013 Israel Institute of Technology", "aff_domain": "cs.ox.ac.uk; ; ; ", "email": "cs.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jesson21a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of Oxford;Technion \u2013 Israel Institute of Technology", "aff_unique_dep": "OAMTL;Machine Learning and Causal Inference in Healthcare Lab", "aff_unique_url": "https://www.ox.ac.uk;https://www.technion.ac.il", "aff_unique_abbr": "Oxford;Technion", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford;", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "United Kingdom;Israel" }, { "title": "Quantifying and Reducing Bias in Maximum Likelihood Estimation of Structured Anomalies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8655", "id": "8655", "proceeding": "http://proceedings.mlr.press/v139/chitra21a.html", "slides": "/media/icml-2021/Slides/8655.pdf", "author_site": "Uthsav Chitra, Kimberly Ding, Jasper C.H. Lee, Benjamin Raphael", "author": "Uthsav Chitra; Kimberly Ding; Jasper C.H. Lee; Benjamin J Raphael", "abstract": "Anomaly estimation, or the problem of finding a subset of a dataset that differs from the rest of the dataset, is a classic problem in machine learning and data mining. In both theoretical work and in applications, the anomaly is assumed to have a specific structure defined by membership in an anomaly family. For example, in temporal data the anomaly family may be time intervals, while in network data the anomaly family may be connected subgraphs. The most prominent approach for anomaly estimation is to compute the Maximum Likelihood Estimator (MLE) of the anomaly; however, it was recently observed that for normally distributed data, the MLE is a biased estimator for some anomaly families. In this work, we demonstrate that in the normal means setting, the bias of the MLE depends on the size of the anomaly family. We prove that if the number of sets in the anomaly family that contain the anomaly is sub-exponential, then the MLE is asymptotically unbiased. We also provide empirical evidence that the converse is true: if the number of such sets is exponential, then the MLE is asymptotically biased. Our analysis unifies a number of earlier results on the bias of the MLE for specific anomaly families. Next, we derive a new anomaly estimator using a mixture model, and we prove that our anomaly estimator is asymptotically unbiased regardless of the size of the anomaly family. We illustrate the advantages of our estimator versus the MLE on disease outbreak data and highway traffic data.", "bibtex": "@InProceedings{pmlr-v139-chitra21a,\n title = \t {Quantifying and Reducing Bias in Maximum Likelihood Estimation of Structured Anomalies},\n author = {Chitra, Uthsav and Ding, Kimberly and Lee, Jasper C.H. and Raphael, Benjamin J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1908--1919},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chitra21a/chitra21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chitra21a.html},\n abstract = \t {Anomaly estimation, or the problem of finding a subset of a dataset that differs from the rest of the dataset, is a classic problem in machine learning and data mining. In both theoretical work and in applications, the anomaly is assumed to have a specific structure defined by membership in an anomaly family. For example, in temporal data the anomaly family may be time intervals, while in network data the anomaly family may be connected subgraphs. The most prominent approach for anomaly estimation is to compute the Maximum Likelihood Estimator (MLE) of the anomaly; however, it was recently observed that for normally distributed data, the MLE is a biased estimator for some anomaly families. In this work, we demonstrate that in the normal means setting, the bias of the MLE depends on the size of the anomaly family. We prove that if the number of sets in the anomaly family that contain the anomaly is sub-exponential, then the MLE is asymptotically unbiased. We also provide empirical evidence that the converse is true: if the number of such sets is exponential, then the MLE is asymptotically biased. Our analysis unifies a number of earlier results on the bias of the MLE for specific anomaly families. Next, we derive a new anomaly estimator using a mixture model, and we prove that our anomaly estimator is asymptotically unbiased regardless of the size of the anomaly family. We illustrate the advantages of our estimator versus the MLE on disease outbreak data and highway traffic data.}\n}", "pdf": "http://proceedings.mlr.press/v139/chitra21a/chitra21a.pdf", "supp": "", "pdf_size": 1948927, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11992163901963267172&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, Princeton University; Department of Computer Science, Princeton University; Department of Computer Science, Brown University; Department of Computer Science, Princeton University", "aff_domain": "princeton.edu; ; ;princeton.edu", "email": "princeton.edu; ; ;princeton.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chitra21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Princeton University;Brown University", "aff_unique_dep": "Department of Computer Science;Department of Computer Science", "aff_unique_url": "https://www.princeton.edu;https://www.brown.edu", "aff_unique_abbr": "Princeton;Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Quantifying the Benefit of Using Differentiable Learning over Tangent Kernels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8683", "id": "8683", "proceeding": "http://proceedings.mlr.press/v139/malach21a.html", "slides": "", "author_site": "Eran Malach, Pritish Kamath, Emmanuel Abbe, Nati Srebro", "author": "Eran Malach; Pritish Kamath; Emmanuel Abbe; Nathan Srebro", "abstract": "We study the relative power of learning with gradient descent on differentiable models, such as neural networks, versus using the corresponding tangent kernels. We show that under certain conditions, gradient descent achieves small error only if a related tangent kernel method achieves a non-trivial advantage over random guessing (a.k.a. weak learning), though this advantage might be very small even when gradient descent can achieve arbitrarily high accuracy. Complementing this, we show that without these conditions, gradient descent can in fact learn with small error even when no kernel method, in particular using the tangent kernel, can achieve a non-trivial advantage over random guessing.", "bibtex": "@InProceedings{pmlr-v139-malach21a,\n title = \t {Quantifying the Benefit of Using Differentiable Learning over Tangent Kernels},\n author = {Malach, Eran and Kamath, Pritish and Abbe, Emmanuel and Srebro, Nathan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7379--7389},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/malach21a/malach21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/malach21a.html},\n abstract = \t {We study the relative power of learning with gradient descent on differentiable models, such as neural networks, versus using the corresponding tangent kernels. We show that under certain conditions, gradient descent achieves small error only if a related tangent kernel method achieves a non-trivial advantage over random guessing (a.k.a. weak learning), though this advantage might be very small even when gradient descent can achieve arbitrarily high accuracy. Complementing this, we show that without these conditions, gradient descent can in fact learn with small error even when no kernel method, in particular using the tangent kernel, can achieve a non-trivial advantage over random guessing.}\n}", "pdf": "http://proceedings.mlr.press/v139/malach21a/malach21a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2345528122854994860&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/malach21a.html" }, { "title": "Quantile Bandits for Best Arms Identification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8699", "id": "8699", "proceeding": "http://proceedings.mlr.press/v139/zhang21o.html", "slides": "", "author_site": "Mengyan Zhang, Cheng Soon Ong", "author": "Mengyan Zhang; Cheng Soon Ong", "abstract": "We consider a variant of the best arm identification task in stochastic multi-armed bandits. Motivated by risk-averse decision-making problems, our goal is to identify a set of $m$ arms with the highest $\\tau$-quantile values within a fixed budget. We prove asymmetric two-sided concentration inequalities for order statistics and quantiles of random variables that have non-decreasing hazard rate, which may be of independent interest. With these inequalities, we analyse a quantile version of Successive Accepts and Rejects (Q-SAR). We derive an upper bound for the probability of arm misidentification, the first justification of a quantile based algorithm for fixed budget multiple best arms identification. We show illustrative experiments for best arm identification.", "bibtex": "@InProceedings{pmlr-v139-zhang21o,\n title = \t {Quantile Bandits for Best Arms Identification},\n author = {Zhang, Mengyan and Ong, Cheng Soon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12513--12523},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21o/zhang21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21o.html},\n abstract = \t {We consider a variant of the best arm identification task in stochastic multi-armed bandits. Motivated by risk-averse decision-making problems, our goal is to identify a set of $m$ arms with the highest $\\tau$-quantile values within a fixed budget. We prove asymmetric two-sided concentration inequalities for order statistics and quantiles of random variables that have non-decreasing hazard rate, which may be of independent interest. With these inequalities, we analyse a quantile version of Successive Accepts and Rejects (Q-SAR). We derive an upper bound for the probability of arm misidentification, the first justification of a quantile based algorithm for fixed budget multiple best arms identification. We show illustrative experiments for best arm identification.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21o/zhang21o.pdf", "supp": "", "pdf_size": 6415203, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6809249853640844054&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "The Australian National University; Data61, CSIRO", "aff_domain": "anu.edu.au;anu.edu.au", "email": "anu.edu.au;anu.edu.au", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/zhang21o.html", "aff_unique_index": "0;1", "aff_unique_norm": "Australian National University;CSIRO", "aff_unique_dep": ";Data61", "aff_unique_url": "https://www.anu.edu.au;https://www.csiro.au", "aff_unique_abbr": "ANU;CSIRO", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Australia" }, { "title": "Quantitative Understanding of VAE as a Non-linearly Scaled Isometric Embedding", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8879", "id": "8879", "proceeding": "http://proceedings.mlr.press/v139/nakagawa21a.html", "slides": "/media/icml-2021/Slides/8879.pdf", "author_site": "Akira Nakagawa, Keizo Kato, Taiji Suzuki", "author": "Akira Nakagawa; Keizo Kato; Taiji Suzuki", "abstract": "Variational autoencoder (VAE) estimates the posterior parameters (mean and variance) of latent variables corresponding to each input data. While it is used for many tasks, the transparency of the model is still an underlying issue. This paper provides a quantitative understanding of VAE property through the differential geometric and information-theoretic interpretations of VAE. According to the Rate-distortion theory, the optimal transform coding is achieved by using an orthonormal transform with PCA basis where the transform space is isometric to the input. Considering the analogy of transform coding to VAE, we clarify theoretically and experimentally that VAE can be mapped to an implicit isometric embedding with a scale factor derived from the posterior parameter. As a result, we can estimate the data probabilities in the input space from the prior, loss metrics, and corresponding posterior parameters, and further, the quantitative importance of each latent variable can be evaluated like the eigenvalue of PCA.", "bibtex": "@InProceedings{pmlr-v139-nakagawa21a,\n title = \t {Quantitative Understanding of VAE as a Non-linearly Scaled Isometric Embedding},\n author = {Nakagawa, Akira and Kato, Keizo and Suzuki, Taiji},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7916--7926},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nakagawa21a/nakagawa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nakagawa21a.html},\n abstract = \t {Variational autoencoder (VAE) estimates the posterior parameters (mean and variance) of latent variables corresponding to each input data. While it is used for many tasks, the transparency of the model is still an underlying issue. This paper provides a quantitative understanding of VAE property through the differential geometric and information-theoretic interpretations of VAE. According to the Rate-distortion theory, the optimal transform coding is achieved by using an orthonormal transform with PCA basis where the transform space is isometric to the input. Considering the analogy of transform coding to VAE, we clarify theoretically and experimentally that VAE can be mapped to an implicit isometric embedding with a scale factor derived from the posterior parameter. As a result, we can estimate the data probabilities in the input space from the prior, loss metrics, and corresponding posterior parameters, and further, the quantitative importance of each latent variable can be evaluated like the eigenvalue of PCA.}\n}", "pdf": "http://proceedings.mlr.press/v139/nakagawa21a/nakagawa21a.pdf", "supp": "", "pdf_size": 2030043, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10335805510258953267&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Fujitsu Limited, Kanagawa, Japan; Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan+Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan; Graduate School of Information Science and Technology, The University of Tokyo, Tokyo, Japan+Center for Advanced Intelligence Project, RIKEN, Tokyo, Japan", "aff_domain": "fujitsu.com; ; ", "email": "fujitsu.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nakagawa21a.html", "aff_unique_index": "0;1+2;1+2", "aff_unique_norm": "Fujitsu Limited;University of Tokyo;RIKEN", "aff_unique_dep": ";Graduate School of Information Science and Technology;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.fujitsu.com;https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "Fujitsu;UTokyo;RIKEN", "aff_campus_unique_index": "1+1;1+1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Quantization Algorithms for Random Fourier Features", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8733", "id": "8733", "proceeding": "http://proceedings.mlr.press/v139/li21i.html", "slides": "", "author_site": "Xiaoyun Li, Ping Li", "author": "Xiaoyun Li; Ping Li", "abstract": "The method of random projection (RP) is the standard technique for dimensionality reduction, approximate near neighbor search, compressed sensing, etc., which provides a simple and effective scheme for approximating pairwise inner products and Euclidean distances in massive data. Closely related to RP, the method of random Fourier features (RFF) has also become popular for approximating the (nonlinear) Gaussian kernel. RFF applies a specific nonlinear transformation on the projected data from RP. In practice, using the Gaussian kernel often leads to better performance than the linear kernel (inner product). After random projections, quantization is an important step for efficient data storage, computation and transmission. Quantization for RP has been extensively studied in the literature. In this paper, we focus on developing quantization algorithms for RFF. The task is in a sense challenging due to the tuning parameter $\\gamma$ in the Gaussian kernel. For example, the quantizer and the quantized data might be tied to each specific Gaussian kernel parameter $\\gamma$. Our contribution begins with the analysis on the probability distributions of RFF, and an interesting discovery that the marginal distribution of RFF is free of the parameter $\\gamma$. This significantly simplifies the design of the Lloyd-Max (LM) quantization scheme for RFF in that there would be only one LM quantizer (regardless of $\\gamma$). Detailed theoretical analysis is provided on the kernel estimators and approximation error, and experiments confirm the effectiveness and efficiency of the proposed method.", "bibtex": "@InProceedings{pmlr-v139-li21i,\n title = \t {Quantization Algorithms for Random Fourier Features},\n author = {Li, Xiaoyun and Li, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6369--6380},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21i/li21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21i.html},\n abstract = \t {The method of random projection (RP) is the standard technique for dimensionality reduction, approximate near neighbor search, compressed sensing, etc., which provides a simple and effective scheme for approximating pairwise inner products and Euclidean distances in massive data. Closely related to RP, the method of random Fourier features (RFF) has also become popular for approximating the (nonlinear) Gaussian kernel. RFF applies a specific nonlinear transformation on the projected data from RP. In practice, using the Gaussian kernel often leads to better performance than the linear kernel (inner product). After random projections, quantization is an important step for efficient data storage, computation and transmission. Quantization for RP has been extensively studied in the literature. In this paper, we focus on developing quantization algorithms for RFF. The task is in a sense challenging due to the tuning parameter $\\gamma$ in the Gaussian kernel. For example, the quantizer and the quantized data might be tied to each specific Gaussian kernel parameter $\\gamma$. Our contribution begins with the analysis on the probability distributions of RFF, and an interesting discovery that the marginal distribution of RFF is free of the parameter $\\gamma$. This significantly simplifies the design of the Lloyd-Max (LM) quantization scheme for RFF in that there would be only one LM quantizer (regardless of $\\gamma$). Detailed theoretical analysis is provided on the kernel estimators and approximation error, and experiments confirm the effectiveness and efficiency of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21i/li21i.pdf", "supp": "", "pdf_size": 852681, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12070065483866348992&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Cognitive Computing Lab, Baidu Research; Cognitive Computing Lab, Baidu Research", "aff_domain": "gmail.com;gmail.com", "email": "gmail.com;gmail.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21i.html", "aff_unique_index": "0;0", "aff_unique_norm": "Baidu", "aff_unique_dep": "Cognitive Computing Lab", "aff_unique_url": "https://baidu.com", "aff_unique_abbr": "Baidu", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Quantum algorithms for reinforcement learning with a generative model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10207", "id": "10207", "proceeding": "http://proceedings.mlr.press/v139/wang21w.html", "slides": "/media/icml-2021/Slides/10207.pdf", "author_site": "Daochen Wang, Aarthi Sundaram, Robin Kothari, Ashish Kapoor, Martin Roetteler", "author": "Daochen Wang; Aarthi Sundaram; Robin Kothari; Ashish Kapoor; Martin Roetteler", "abstract": "Reinforcement learning studies how an agent should interact with an environment to maximize its cumulative reward. A standard way to study this question abstractly is to ask how many samples an agent needs from the environment to learn an optimal policy for a $\\gamma$-discounted Markov decision process (MDP). For such an MDP, we design quantum algorithms that approximate an optimal policy ($\\pi^*$), the optimal value function ($v^*$), and the optimal $Q$-function ($q^*$), assuming the algorithms can access samples from the environment in quantum superposition. This assumption is justified whenever there exists a simulator for the environment; for example, if the environment is a video game or some other program. Our quantum algorithms, inspired by value iteration, achieve quadratic speedups over the best-possible classical sample complexities in the approximation accuracy ($\\epsilon$) and two main parameters of the MDP: the effective time horizon ($\\frac{1}{1-\\gamma}$) and the size of the action space ($A$). Moreover, we show that our quantum algorithm for computing $q^*$ is optimal by proving a matching quantum lower bound.", "bibtex": "@InProceedings{pmlr-v139-wang21w,\n title = \t {Quantum algorithms for reinforcement learning with a generative model},\n author = {Wang, Daochen and Sundaram, Aarthi and Kothari, Robin and Kapoor, Ashish and Roetteler, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10916--10926},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21w/wang21w.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21w.html},\n abstract = \t {Reinforcement learning studies how an agent should interact with an environment to maximize its cumulative reward. A standard way to study this question abstractly is to ask how many samples an agent needs from the environment to learn an optimal policy for a $\\gamma$-discounted Markov decision process (MDP). For such an MDP, we design quantum algorithms that approximate an optimal policy ($\\pi^*$), the optimal value function ($v^*$), and the optimal $Q$-function ($q^*$), assuming the algorithms can access samples from the environment in quantum superposition. This assumption is justified whenever there exists a simulator for the environment; for example, if the environment is a video game or some other program. Our quantum algorithms, inspired by value iteration, achieve quadratic speedups over the best-possible classical sample complexities in the approximation accuracy ($\\epsilon$) and two main parameters of the MDP: the effective time horizon ($\\frac{1}{1-\\gamma}$) and the size of the action space ($A$). Moreover, we show that our quantum algorithm for computing $q^*$ is optimal by proving a matching quantum lower bound.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21w/wang21w.pdf", "supp": "", "pdf_size": 414994, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12274927026480667054&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Maryland; Microsoft Quantum; Microsoft Quantum; Microsoft; Microsoft Quantum", "aff_domain": "gmail.com;microsoft.com; ; ; ", "email": "gmail.com;microsoft.com; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wang21w.html", "aff_unique_index": "0;1;1;1;1", "aff_unique_norm": "University of Maryland;Microsoft", "aff_unique_dep": ";Microsoft Quantum", "aff_unique_url": "https://www/umd.edu;https://www.microsoft.com", "aff_unique_abbr": "UMD;Microsoft", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Quasi-global Momentum: Accelerating Decentralized Deep Learning on Heterogeneous Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10319", "id": "10319", "proceeding": "http://proceedings.mlr.press/v139/lin21c.html", "slides": "", "author_site": "Tao Lin, Sai Praneeth Reddy Karimireddy, Sebastian Stich, Martin Jaggi", "author": "Tao Lin; Sai Praneeth Karimireddy; Sebastian Stich; Martin Jaggi", "abstract": "Decentralized training of deep learning models is a key element for enabling data privacy and on-device learning over networks. In realistic learning scenarios, the presence of heterogeneity across different clients\u2019 local datasets poses an optimization challenge and may severely deteriorate the generalization performance. In this paper, we investigate and identify the limitation of several decentralized optimization algorithms for different degrees of data heterogeneity. We propose a novel momentum-based method to mitigate this decentralized training difficulty. We show in extensive empirical experiments on various CV/NLP datasets (CIFAR-10, ImageNet, and AG News) and several network topologies (Ring and Social Network) that our method is much more robust to the heterogeneity of clients\u2019 data than other existing methods, by a significant improvement in test performance (1%-20%).", "bibtex": "@InProceedings{pmlr-v139-lin21c,\n title = \t {Quasi-global Momentum: Accelerating Decentralized Deep Learning on Heterogeneous Data},\n author = {Lin, Tao and Karimireddy, Sai Praneeth and Stich, Sebastian and Jaggi, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6654--6665},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lin21c/lin21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/lin21c.html},\n abstract = \t {Decentralized training of deep learning models is a key element for enabling data privacy and on-device learning over networks. In realistic learning scenarios, the presence of heterogeneity across different clients\u2019 local datasets poses an optimization challenge and may severely deteriorate the generalization performance. In this paper, we investigate and identify the limitation of several decentralized optimization algorithms for different degrees of data heterogeneity. We propose a novel momentum-based method to mitigate this decentralized training difficulty. We show in extensive empirical experiments on various CV/NLP datasets (CIFAR-10, ImageNet, and AG News) and several network topologies (Ring and Social Network) that our method is much more robust to the heterogeneity of clients\u2019 data than other existing methods, by a significant improvement in test performance (1%-20%).}\n}", "pdf": "http://proceedings.mlr.press/v139/lin21c/lin21c.pdf", "supp": "", "pdf_size": 9267945, "gs_citation": 112, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11090813795485624273&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "EPFL, Lausanne, Switzerland; EPFL, Lausanne, Switzerland; EPFL, Lausanne, Switzerland; EPFL, Lausanne, Switzerland", "aff_domain": "epfl.ch; ; ; ", "email": "epfl.ch; ; ; ", "github": "github.com/epfml/quasi-global-momentum", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lin21c.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Lausanne", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Query Complexity of Adversarial Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9353", "id": "9353", "proceeding": "http://proceedings.mlr.press/v139/gluch21a.html", "slides": "", "author_site": "Grzegorz Gluch, R\u00fcdiger Urbanke", "author": "Grzegorz Gluch; R\u00fcdiger Urbanke", "abstract": "There are two main attack models considered in the adversarial robustness literature: black-box and white-box. We consider these threat models as two ends of a fine-grained spectrum, indexed by the number of queries the adversary can ask. Using this point of view we investigate how many queries the adversary needs to make to design an attack that is comparable to the best possible attack in the white-box model. We give a lower bound on that number of queries in terms of entropy of decision boundaries of the classifier. Using this result we analyze two classical learning algorithms on two synthetic tasks for which we prove meaningful security guarantees. The obtained bounds suggest that some learning algorithms are inherently more robust against query-bounded adversaries than others.", "bibtex": "@InProceedings{pmlr-v139-gluch21a,\n title = \t {Query Complexity of Adversarial Attacks},\n author = {Gluch, Grzegorz and Urbanke, R{\\\"u}diger},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3723--3733},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gluch21a/gluch21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gluch21a.html},\n abstract = \t {There are two main attack models considered in the adversarial robustness literature: black-box and white-box. We consider these threat models as two ends of a fine-grained spectrum, indexed by the number of queries the adversary can ask. Using this point of view we investigate how many queries the adversary needs to make to design an attack that is comparable to the best possible attack in the white-box model. We give a lower bound on that number of queries in terms of entropy of decision boundaries of the classifier. Using this result we analyze two classical learning algorithms on two synthetic tasks for which we prove meaningful security guarantees. The obtained bounds suggest that some learning algorithms are inherently more robust against query-bounded adversaries than others.}\n}", "pdf": "http://proceedings.mlr.press/v139/gluch21a/gluch21a.pdf", "supp": "", "pdf_size": 376190, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17542016952681777919&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Computer and Communication Sciences, EPFL, Switzerland; School of Computer and Communication Sciences, EPFL, Switzerland", "aff_domain": "epfl.ch; ", "email": "epfl.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/gluch21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "School of Computer and Communication Sciences", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "RATT: Leveraging Unlabeled Data to Guarantee Generalization", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10203", "id": "10203", "proceeding": "http://proceedings.mlr.press/v139/garg21a.html", "slides": "", "author_site": "Saurabh Garg, Sivaraman Balakrishnan, Zico Kolter, Zachary Lipton", "author": "Saurabh Garg; Sivaraman Balakrishnan; Zico Kolter; Zachary Lipton", "abstract": "To assess generalization, machine learning scientists typically either (i) bound the generalization gap and then (after training) plug in the empirical risk to obtain a bound on the true risk; or (ii) validate empirically on holdout data. However, (i) typically yields vacuous guarantees for overparameterized models; and (ii) shrinks the training set and its guarantee erodes with each re-use of the holdout set. In this paper, we leverage unlabeled data to produce generalization bounds. After augmenting our (labeled) training set with randomly labeled data, we train in the standard fashion. Whenever classifiers achieve low error on the clean data but high error on the random data, our bound ensures that the true risk is low. We prove that our bound is valid for 0-1 empirical risk minimization and with linear classifiers trained by gradient descent. Our approach is especially useful in conjunction with deep learning due to the early learning phenomenon whereby networks fit true labels before noisy labels but requires one intuitive assumption. Empirically, on canonical computer vision and NLP tasks, our bound provides non-vacuous generalization guarantees that track actual performance closely. This work enables practitioners to certify generalization even when (labeled) holdout data is unavailable and provides insights into the relationship between random label noise and generalization.", "bibtex": "@InProceedings{pmlr-v139-garg21a,\n title = \t {RATT: Leveraging Unlabeled Data to Guarantee Generalization},\n author = {Garg, Saurabh and Balakrishnan, Sivaraman and Kolter, Zico and Lipton, Zachary},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3598--3609},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/garg21a/garg21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/garg21a.html},\n abstract = \t {To assess generalization, machine learning scientists typically either (i) bound the generalization gap and then (after training) plug in the empirical risk to obtain a bound on the true risk; or (ii) validate empirically on holdout data. However, (i) typically yields vacuous guarantees for overparameterized models; and (ii) shrinks the training set and its guarantee erodes with each re-use of the holdout set. In this paper, we leverage unlabeled data to produce generalization bounds. After augmenting our (labeled) training set with randomly labeled data, we train in the standard fashion. Whenever classifiers achieve low error on the clean data but high error on the random data, our bound ensures that the true risk is low. We prove that our bound is valid for 0-1 empirical risk minimization and with linear classifiers trained by gradient descent. Our approach is especially useful in conjunction with deep learning due to the early learning phenomenon whereby networks fit true labels before noisy labels but requires one intuitive assumption. Empirically, on canonical computer vision and NLP tasks, our bound provides non-vacuous generalization guarantees that track actual performance closely. This work enables practitioners to certify generalization even when (labeled) holdout data is unavailable and provides insights into the relationship between random label noise and generalization.}\n}", "pdf": "http://proceedings.mlr.press/v139/garg21a/garg21a.pdf", "supp": "", "pdf_size": 418405, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5614969385611278866&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Machine Learning Department, Carnegie Mellon University; Department of Statistics and Data Science, Carnegie Mellon University + Computer Science Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University", "aff_domain": "andrew.cmu.edu; ; ; ", "email": "andrew.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/garg21a.html", "aff_unique_index": "0;0+0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Machine Learning Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "REPAINT: Knowledge Transfer in Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8839", "id": "8839", "proceeding": "http://proceedings.mlr.press/v139/tao21a.html", "slides": "/media/icml-2021/Slides/8839.pdf", "author_site": "Yunzhe Tao, Sahika Genc, Jonathan Chung, TAO SUN, Sunil Mallya", "author": "Yunzhe Tao; Sahika Genc; Jonathan Chung; Tao Sun; Sunil Mallya", "abstract": "Accelerating learning processes for complex tasks by leveraging previously learned tasks has been one of the most challenging problems in reinforcement learning, especially when the similarity between source and target tasks is low. This work proposes REPresentation And INstance Transfer (REPAINT) algorithm for knowledge transfer in deep reinforcement learning. REPAINT not only transfers the representation of a pre-trained teacher policy in the on-policy learning, but also uses an advantage-based experience selection approach to transfer useful samples collected following the teacher policy in the off-policy learning. Our experimental results on several benchmark tasks show that REPAINT significantly reduces the total training time in generic cases of task similarity. In particular, when the source tasks are dissimilar to, or sub-tasks of, the target tasks, REPAINT outperforms other baselines in both training-time reduction and asymptotic performance of return scores.", "bibtex": "@InProceedings{pmlr-v139-tao21a,\n title = \t {REPAINT: Knowledge Transfer in Deep Reinforcement Learning},\n author = {Tao, Yunzhe and Genc, Sahika and Chung, Jonathan and Sun, Tao and Mallya, Sunil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10141--10152},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tao21a/tao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tao21a.html},\n abstract = \t {Accelerating learning processes for complex tasks by leveraging previously learned tasks has been one of the most challenging problems in reinforcement learning, especially when the similarity between source and target tasks is low. This work proposes REPresentation And INstance Transfer (REPAINT) algorithm for knowledge transfer in deep reinforcement learning. REPAINT not only transfers the representation of a pre-trained teacher policy in the on-policy learning, but also uses an advantage-based experience selection approach to transfer useful samples collected following the teacher policy in the off-policy learning. Our experimental results on several benchmark tasks show that REPAINT significantly reduces the total training time in generic cases of task similarity. In particular, when the source tasks are dissimilar to, or sub-tasks of, the target tasks, REPAINT outperforms other baselines in both training-time reduction and asymptotic performance of return scores.}\n}", "pdf": "http://proceedings.mlr.press/v139/tao21a/tao21a.pdf", "supp": "", "pdf_size": 5861972, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7953830827004525978&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "AI Labs, Amazon Web Services, Seattle, WA 98121, USA; AI Labs, Amazon Web Services, Seattle, WA 98121, USA; AI Labs, Amazon Web Services, Seattle, WA 98121, USA; AI Labs, Amazon Web Services, Seattle, WA 98121, USA; AI Labs, Amazon Web Services, Seattle, WA 98121, USA", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/tao21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Amazon", "aff_unique_dep": "AI Labs", "aff_unique_url": "https://aws.amazon.com", "aff_unique_abbr": "AWS", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "RNN with Particle Flow for Probabilistic Spatio-temporal Forecasting", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9195", "id": "9195", "proceeding": "http://proceedings.mlr.press/v139/pal21b.html", "slides": "/media/icml-2021/Slides/9195.pdf", "author_site": "Soumyasundar Pal, Liheng Ma, Yingxue Zhang, Mark Coates", "author": "Soumyasundar Pal; Liheng Ma; Yingxue Zhang; Mark Coates", "abstract": "Spatio-temporal forecasting has numerous applications in analyzing wireless, traffic, and financial networks. Many classical statistical models often fall short in handling the complexity and high non-linearity present in time-series data. Recent advances in deep learning allow for better modelling of spatial and temporal dependencies. While most of these models focus on obtaining accurate point forecasts, they do not characterize the prediction uncertainty. In this work, we consider the time-series data as a random realization from a nonlinear state-space model and target Bayesian inference of the hidden states for probabilistic forecasting. We use particle flow as the tool for approximating the posterior distribution of the states, as it is shown to be highly effective in complex, high-dimensional settings. Thorough experimentation on several real world time-series datasets demonstrates that our approach provides better characterization of uncertainty while maintaining comparable accuracy to the state-of-the-art point forecasting methods.", "bibtex": "@InProceedings{pmlr-v139-pal21b,\n title = \t {RNN with Particle Flow for Probabilistic Spatio-temporal Forecasting},\n author = {Pal, Soumyasundar and Ma, Liheng and Zhang, Yingxue and Coates, Mark},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8336--8348},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pal21b/pal21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/pal21b.html},\n abstract = \t {Spatio-temporal forecasting has numerous applications in analyzing wireless, traffic, and financial networks. Many classical statistical models often fall short in handling the complexity and high non-linearity present in time-series data. Recent advances in deep learning allow for better modelling of spatial and temporal dependencies. While most of these models focus on obtaining accurate point forecasts, they do not characterize the prediction uncertainty. In this work, we consider the time-series data as a random realization from a nonlinear state-space model and target Bayesian inference of the hidden states for probabilistic forecasting. We use particle flow as the tool for approximating the posterior distribution of the states, as it is shown to be highly effective in complex, high-dimensional settings. Thorough experimentation on several real world time-series datasets demonstrates that our approach provides better characterization of uncertainty while maintaining comparable accuracy to the state-of-the-art point forecasting methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/pal21b/pal21b.pdf", "supp": "", "pdf_size": 1024061, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16256105255072962985&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Electrical and Computer Engineering, McGill University, Montr \u00b4eal, QC, Canada\u2020; Department of Electrical and Computer Engineering, McGill University, Montr \u00b4eal, QC, Canada\u2020; Huawei Noah\u2019s Ark Lab, Montr \u00b4eal Research Center, Montr \u00b4eal, QC, Canada; Department of Electrical and Computer Engineering, McGill University, Montr \u00b4eal, QC, Canada\u2020", "aff_domain": "mail.mcgill.ca; ; ; ", "email": "mail.mcgill.ca; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/pal21b.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "McGill University;Huawei", "aff_unique_dep": "Department of Electrical and Computer Engineering;Montr\u00e9al Research Center", "aff_unique_url": "https://www.mcgill.ca;https://www.huawei.com", "aff_unique_abbr": "McGill;Huawei", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Montr\u00e9al", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Canada" }, { "title": "RNNRepair: Automatic RNN Repair via Model-based Analysis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8765", "id": "8765", "proceeding": "http://proceedings.mlr.press/v139/xie21b.html", "slides": "", "author_site": "Xiaofei Xie, Wenbo Guo, Lei Ma, Wei Le, Jian Wang, Lingjun Zhou, Yang Liu, Xinyu Xing", "author": "Xiaofei Xie; Wenbo Guo; Lei Ma; Wei Le; Jian Wang; Lingjun Zhou; Yang Liu; Xinyu Xing", "abstract": "Deep neural networks are vulnerable to adversarial attacks. Due to their black-box nature, it is rather challenging to interpret and properly repair these incorrect behaviors. This paper focuses on interpreting and repairing the incorrect behaviors of Recurrent Neural Networks (RNNs). We propose a lightweight model-based approach (RNNRepair) to help understand and repair incorrect behaviors of an RNN. Specifically, we build an influence model to characterize the stateful and statistical behaviors of an RNN over all the training data and to perform the influence analysis for the errors. Compared with the existing techniques on influence function, our method can efficiently estimate the influence of existing or newly added training samples for a given prediction at both sample level and segmentation level. Our empirical evaluation shows that the proposed influence model is able to extract accurate and understandable features. Based on the influence model, our proposed technique could effectively infer the influential instances from not only an entire testing sequence but also a segment within that sequence. Moreover, with the sample-level and segment-level influence relations, RNNRepair could further remediate two types of incorrect predictions at the sample level and segment level.", "bibtex": "@InProceedings{pmlr-v139-xie21b,\n title = \t {RNNRepair: Automatic RNN Repair via Model-based Analysis},\n author = {Xie, Xiaofei and Guo, Wenbo and Ma, Lei and Le, Wei and Wang, Jian and Zhou, Lingjun and Liu, Yang and Xing, Xinyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11383--11392},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xie21b/xie21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/xie21b.html},\n abstract = \t {Deep neural networks are vulnerable to adversarial attacks. Due to their black-box nature, it is rather challenging to interpret and properly repair these incorrect behaviors. This paper focuses on interpreting and repairing the incorrect behaviors of Recurrent Neural Networks (RNNs). We propose a lightweight model-based approach (RNNRepair) to help understand and repair incorrect behaviors of an RNN. Specifically, we build an influence model to characterize the stateful and statistical behaviors of an RNN over all the training data and to perform the influence analysis for the errors. Compared with the existing techniques on influence function, our method can efficiently estimate the influence of existing or newly added training samples for a given prediction at both sample level and segmentation level. Our empirical evaluation shows that the proposed influence model is able to extract accurate and understandable features. Based on the influence model, our proposed technique could effectively infer the influential instances from not only an entire testing sequence but also a segment within that sequence. Moreover, with the sample-level and segment-level influence relations, RNNRepair could further remediate two types of incorrect predictions at the sample level and segment level.}\n}", "pdf": "http://proceedings.mlr.press/v139/xie21b/xie21b.pdf", "supp": "", "pdf_size": 811894, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4965812938953979776&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Nanyang Technological University, Singapore+Kyushu University, Japan; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; University of Alberta, Canada+Alberta Machine Intelligence Institute, Canada; Iowa State University, USA; Nanyang Technological University, Singapore; Tianjin University, China; College of Information Sciences and Technology, The Pennsylvania State University, State College, PA, USA; Nanyang Technological University, Singapore", "aff_domain": "gmail.com; ; ; ; ; ; ; ", "email": "gmail.com; ; ; ; ; ; ; ", "github": "", "project": "https://bitbucket.org/xiaofeixie/rnnrepair", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/xie21b.html", "aff_unique_index": "0+1;2;3+4;5;0;6;2;0", "aff_unique_norm": "Nanyang Technological University;Kyushu University;Pennsylvania State University;University of Alberta;Alberta Machine Intelligence Institute;Iowa State University;Tianjin University", "aff_unique_dep": ";;College of Information Sciences and Technology;;;;", "aff_unique_url": "https://www.ntu.edu.sg;https://www.kyushu-u.ac.jp;https://www.psu.edu;https://www.ualberta.ca;https://www.ami.ualberta.ca/;https://www.iastate.edu;http://www.tju.edu.cn", "aff_unique_abbr": "NTU;Kyushu U;PSU;UAlberta;AMII;ISU;Tianjin U", "aff_campus_unique_index": ";1;;1", "aff_campus_unique": ";State College", "aff_country_unique_index": "0+1;2;3+3;2;0;4;2;0", "aff_country_unique": "Singapore;Japan;United States;Canada;China" }, { "title": "RRL: Resnet as representation for Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10161", "id": "10161", "proceeding": "http://proceedings.mlr.press/v139/shah21a.html", "slides": "", "author_site": "Rutav Shah, Vikash Kumar", "author": "Rutav M Shah; Vikash Kumar", "abstract": "The ability to autonomously learn behaviors via direct interactions in uninstrumented environments can lead to generalist robots capable of enhancing productivity or providing care in unstructured settings like homes. Such uninstrumented settings warrant operations only using the robot\u2019s proprioceptive sensor such as onboard cameras, joint encoders, etc which can be challenging for policy learning owing to the high dimensionality and partial observability issues. We propose RRL: Resnet as representation for Reinforcement Learning {\u2013} a straightforward yet effective approach that can learn complex behaviors directly from proprioceptive inputs. RRL fuses features extracted from pre-trained Resnet into the standard reinforcement learning pipeline and delivers results comparable to learning directly from the state. In a simulated dexterous manipulation benchmark, where the state of the art methods fails to make significant progress, RRL delivers contact rich behaviors. The appeal of RRL lies in its simplicity in bringing together progress from the fields of Representation Learning, Imitation Learning, and Reinforcement Learning. Its effectiveness in learning behaviors directly from visual inputs with performance and sample efficiency matching learning directly from the state, even in complex high dimensional domains, is far from obvious.", "bibtex": "@InProceedings{pmlr-v139-shah21a,\n title = \t {RRL: Resnet as representation for Reinforcement Learning},\n author = {Shah, Rutav M and Kumar, Vikash},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9465--9476},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shah21a/shah21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shah21a.html},\n abstract = \t {The ability to autonomously learn behaviors via direct interactions in uninstrumented environments can lead to generalist robots capable of enhancing productivity or providing care in unstructured settings like homes. Such uninstrumented settings warrant operations only using the robot\u2019s proprioceptive sensor such as onboard cameras, joint encoders, etc which can be challenging for policy learning owing to the high dimensionality and partial observability issues. We propose RRL: Resnet as representation for Reinforcement Learning {\u2013} a straightforward yet effective approach that can learn complex behaviors directly from proprioceptive inputs. RRL fuses features extracted from pre-trained Resnet into the standard reinforcement learning pipeline and delivers results comparable to learning directly from the state. In a simulated dexterous manipulation benchmark, where the state of the art methods fails to make significant progress, RRL delivers contact rich behaviors. The appeal of RRL lies in its simplicity in bringing together progress from the fields of Representation Learning, Imitation Learning, and Reinforcement Learning. Its effectiveness in learning behaviors directly from visual inputs with performance and sample efficiency matching learning directly from the state, even in complex high dimensional domains, is far from obvious.}\n}", "pdf": "http://proceedings.mlr.press/v139/shah21a/shah21a.pdf", "supp": "", "pdf_size": 7425066, "gs_citation": 129, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2882214576859526134&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Engineering, Indian Institute of Technology, Kharagpur, India; Department of Computer Science, University of Washington, Seattle, USA", "aff_domain": "gmail.com;cs.washington.edu", "email": "gmail.com;cs.washington.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/shah21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Indian Institute of Technology Kharagpur;University of Washington", "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science", "aff_unique_url": "https://www.iitkgp.ac.in;https://www.washington.edu", "aff_unique_abbr": "IIT Kharagpur;UW", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Kharagpur;Seattle", "aff_country_unique_index": "0;1", "aff_country_unique": "India;United States" }, { "title": "Randomized Algorithms for Submodular Function Maximization with a $k$-System Constraint", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9387", "id": "9387", "proceeding": "http://proceedings.mlr.press/v139/cui21b.html", "slides": "/media/icml-2021/Slides/9387.pdf", "author_site": "Shuang Cui, Kai Han, Tianshuai Zhu, Jing Tang, Benwei Wu, He Huang", "author": "Shuang Cui; Kai Han; Tianshuai Zhu; Jing Tang; Benwei Wu; He Huang", "abstract": "Submodular optimization has numerous applications such as crowdsourcing and viral marketing. In this paper, we study the problem of non-negative submodular function maximization subject to a $k$-system constraint, which generalizes many other important constraints in submodular optimization such as cardinality constraint, matroid constraint, and $k$-extendible system constraint. The existing approaches for this problem are all based on deterministic algorithmic frameworks, and the best approximation ratio achieved by these algorithms (for a general submodular function) is $k+2\\sqrt{k+2}+3$. We propose a randomized algorithm with an improved approximation ratio of $(1+\\sqrt{k})^2$, while achieving nearly-linear time complexity significantly lower than that of the state-of-the-art algorithm. We also show that our algorithm can be further generalized to address a stochastic case where the elements can be adaptively selected, and propose an approximation ratio of $(1+\\sqrt{k+1})^2$ for the adaptive optimization case. The empirical performance of our algorithms is extensively evaluated in several applications related to data mining and social computing, and the experimental results demonstrate the superiorities of our algorithms in terms of both utility and efficiency.", "bibtex": "@InProceedings{pmlr-v139-cui21b,\n title = \t {Randomized Algorithms for Submodular Function Maximization with a $k$-System Constraint},\n author = {Cui, Shuang and Han, Kai and Zhu, Tianshuai and Tang, Jing and Wu, Benwei and Huang, He},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2222--2232},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cui21b/cui21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cui21b.html},\n abstract = \t {Submodular optimization has numerous applications such as crowdsourcing and viral marketing. In this paper, we study the problem of non-negative submodular function maximization subject to a $k$-system constraint, which generalizes many other important constraints in submodular optimization such as cardinality constraint, matroid constraint, and $k$-extendible system constraint. The existing approaches for this problem are all based on deterministic algorithmic frameworks, and the best approximation ratio achieved by these algorithms (for a general submodular function) is $k+2\\sqrt{k+2}+3$. We propose a randomized algorithm with an improved approximation ratio of $(1+\\sqrt{k})^2$, while achieving nearly-linear time complexity significantly lower than that of the state-of-the-art algorithm. We also show that our algorithm can be further generalized to address a stochastic case where the elements can be adaptively selected, and propose an approximation ratio of $(1+\\sqrt{k+1})^2$ for the adaptive optimization case. The empirical performance of our algorithms is extensively evaluated in several applications related to data mining and social computing, and the experimental results demonstrate the superiorities of our algorithms in terms of both utility and efficiency.}\n}", "pdf": "http://proceedings.mlr.press/v139/cui21b/cui21b.pdf", "supp": "", "pdf_size": 403899, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6882118348427894448&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "School of Computer Science and Technology / SuZhou Research Institute, University of Science and Technology of China; School of Computer Science and Technology / SuZhou Research Institute, University of Science and Technology of China; School of Computer Science and Technology / SuZhou Research Institute, University of Science and Technology of China; Data Science and Analytics Thrust, The Hong Kong University of Science and Technology; School of Computer Science and Technology / SuZhou Research Institute, University of Science and Technology of China; School of Computer Science and Technology, Soochow University", "aff_domain": "ustc.edu.cn; ; ; ; ; ", "email": "ustc.edu.cn; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cui21b.html", "aff_unique_index": "0;0;0;1;0;2", "aff_unique_norm": "University of Science and Technology of China;Hong Kong University of Science and Technology;Soochow University", "aff_unique_dep": "School of Computer Science and Technology;Data Science and Analytics Thrust;School of Computer Science and Technology", "aff_unique_url": "http://www.ustc.edu.cn;https://www.ust.hk;https://eng.suda.edu.cn/", "aff_unique_abbr": "USTC;HKUST;Soochow U", "aff_campus_unique_index": "0;0;0;1;0", "aff_campus_unique": "SuZhou;Hong Kong SAR;", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Randomized Dimensionality Reduction for Facility Location and Single-Linkage Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9975", "id": "9975", "proceeding": "http://proceedings.mlr.press/v139/narayanan21b.html", "slides": "", "author_site": "Shyam Narayanan, Sandeep Silwal, Piotr Indyk, Or Zamir", "author": "Shyam Narayanan; Sandeep Silwal; Piotr Indyk; Or Zamir", "abstract": "Random dimensionality reduction is a versatile tool for speeding up algorithms for high-dimensional problems. We study its application to two clustering problems: the facility location problem, and the single-linkage hierarchical clustering problem, which is equivalent to computing the minimum spanning tree. We show that if we project the input pointset $X$ onto a random $d = O(d_X)$-dimensional subspace (where $d_X$ is the doubling dimension of $X$), then the optimum facility location cost in the projected space approximates the original cost up to a constant factor. We show an analogous statement for minimum spanning tree, but with the dimension $d$ having an extra $\\log \\log n$ term and the approximation factor being arbitrarily close to $1$. Furthermore, we extend these results to approximating {\\em solutions} instead of just their {\\em costs}. Lastly, we provide experimental results to validate the quality of solutions and the speedup due to the dimensionality reduction. Unlike several previous papers studying this approach in the context of $k$-means and $k$-medians, our dimension bound does not depend on the number of clusters but only on the intrinsic dimensionality of $X$.", "bibtex": "@InProceedings{pmlr-v139-narayanan21b,\n title = \t {Randomized Dimensionality Reduction for Facility Location and Single-Linkage Clustering},\n author = {Narayanan, Shyam and Silwal, Sandeep and Indyk, Piotr and Zamir, Or},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7948--7957},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/narayanan21b/narayanan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/narayanan21b.html},\n abstract = \t {Random dimensionality reduction is a versatile tool for speeding up algorithms for high-dimensional problems. We study its application to two clustering problems: the facility location problem, and the single-linkage hierarchical clustering problem, which is equivalent to computing the minimum spanning tree. We show that if we project the input pointset $X$ onto a random $d = O(d_X)$-dimensional subspace (where $d_X$ is the doubling dimension of $X$), then the optimum facility location cost in the projected space approximates the original cost up to a constant factor. We show an analogous statement for minimum spanning tree, but with the dimension $d$ having an extra $\\log \\log n$ term and the approximation factor being arbitrarily close to $1$. Furthermore, we extend these results to approximating {\\em solutions} instead of just their {\\em costs}. Lastly, we provide experimental results to validate the quality of solutions and the speedup due to the dimensionality reduction. Unlike several previous papers studying this approach in the context of $k$-means and $k$-medians, our dimension bound does not depend on the number of clusters but only on the intrinsic dimensionality of $X$.}\n}", "pdf": "http://proceedings.mlr.press/v139/narayanan21b/narayanan21b.pdf", "supp": "", "pdf_size": 365684, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7749845481114533777&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Electrical Engineering and Computer Science Department, Massachusetts Institute of Technology, Cambridge, MA, USA; Electrical Engineering and Computer Science Department, Massachusetts Institute of Technology, Cambridge, MA, USA; Electrical Engineering and Computer Science Department, Massachusetts Institute of Technology, Cambridge, MA, USA; Institute of Advanced Study, Princeton, NJ, USA", "aff_domain": "mit.edu;mit.edu;mit.edu;ias.edu", "email": "mit.edu;mit.edu;mit.edu;ias.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/narayanan21b.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Institute of Advanced Study", "aff_unique_dep": "Electrical Engineering and Computer Science Department;", "aff_unique_url": "https://web.mit.edu;https://ias.edu", "aff_unique_abbr": "MIT;IAS", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Cambridge;Princeton", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Randomized Entity-wise Factorization for Multi-Agent Reinforcement Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10527", "id": "10527", "proceeding": "http://proceedings.mlr.press/v139/iqbal21a.html", "slides": "", "author_site": "Shariq Iqbal, Christian Schroeder, Bei Peng, Wendelin Boehmer, Shimon Whiteson, Fei Sha", "author": "Shariq Iqbal; Christian A Schroeder De Witt; Bei Peng; Wendelin Boehmer; Shimon Whiteson; Fei Sha", "abstract": "Multi-agent settings in the real world often involve tasks with varying types and quantities of agents and non-agent entities; however, common patterns of behavior often emerge among these agents/entities. Our method aims to leverage these commonalities by asking the question: \u201cWhat is the expected utility of each agent when only considering a randomly selected sub-group of its observed entities?\u201d By posing this counterfactual question, we can recognize state-action trajectories within sub-groups of entities that we may have encountered in another task and use what we learned in that task to inform our prediction in the current one. We then reconstruct a prediction of the full returns as a combination of factors considering these disjoint groups of entities and train this \u201crandomly factorized\" value function as an auxiliary objective for value-based multi-agent reinforcement learning. By doing so, our model can recognize and leverage similarities across tasks to improve learning efficiency in a multi-task setting. Our approach, Randomized Entity-wise Factorization for Imagined Learning (REFIL), outperforms all strong baselines by a significant margin in challenging multi-task StarCraft micromanagement settings.", "bibtex": "@InProceedings{pmlr-v139-iqbal21a,\n title = \t {Randomized Entity-wise Factorization for Multi-Agent Reinforcement Learning},\n author = {Iqbal, Shariq and De Witt, Christian A Schroeder and Peng, Bei and Boehmer, Wendelin and Whiteson, Shimon and Sha, Fei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4596--4606},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/iqbal21a/iqbal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/iqbal21a.html},\n abstract = \t {Multi-agent settings in the real world often involve tasks with varying types and quantities of agents and non-agent entities; however, common patterns of behavior often emerge among these agents/entities. Our method aims to leverage these commonalities by asking the question: \u201cWhat is the expected utility of each agent when only considering a randomly selected sub-group of its observed entities?\u201d By posing this counterfactual question, we can recognize state-action trajectories within sub-groups of entities that we may have encountered in another task and use what we learned in that task to inform our prediction in the current one. We then reconstruct a prediction of the full returns as a combination of factors considering these disjoint groups of entities and train this \u201crandomly factorized\" value function as an auxiliary objective for value-based multi-agent reinforcement learning. By doing so, our model can recognize and leverage similarities across tasks to improve learning efficiency in a multi-task setting. Our approach, Randomized Entity-wise Factorization for Imagined Learning (REFIL), outperforms all strong baselines by a significant margin in challenging multi-task StarCraft micromanagement settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/iqbal21a/iqbal21a.pdf", "supp": "", "pdf_size": 3234585, "gs_citation": 83, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4592647130622480373&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, University of Southern California; Department of Computer Science, University of Oxford; Department of Computer Science, University of Oxford; Department of Software Technology, Delft University of Technology; Department of Computer Science, University of Oxford; Department of Computer Science, University of Southern California + Google Research", "aff_domain": "usc.edu; ; ; ; ; ", "email": "usc.edu; ; ; ; ; ", "github": "https://github.com/shariqiqbal2810/REFIL", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/iqbal21a.html", "aff_unique_index": "0;1;1;2;1;0+3", "aff_unique_norm": "University of Southern California;University of Oxford;Delft University of Technology;Google", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Department of Software Technology;Google Research", "aff_unique_url": "https://www.usc.edu;https://www.ox.ac.uk;https://www.tudelft.nl;https://research.google", "aff_unique_abbr": "USC;Oxford;TUDelft;Google Research", "aff_campus_unique_index": "0;1;1;2;1;0+3", "aff_campus_unique": "Los Angeles;Oxford;Delft;Mountain View", "aff_country_unique_index": "0;1;1;2;1;0+0", "aff_country_unique": "United States;United Kingdom;Netherlands" }, { "title": "Randomized Exploration in Reinforcement Learning with General Value Function Approximation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9803", "id": "9803", "proceeding": "http://proceedings.mlr.press/v139/ishfaq21a.html", "slides": "/media/icml-2021/Slides/9803.pdf", "author_site": "Haque Ishfaq, Qiwen Cui, Viet Nguyen, Alex Ayoub, Zhuoran Yang, Zhaoran Wang, Doina Precup, Lin Yang", "author": "Haque Ishfaq; Qiwen Cui; Viet Nguyen; Alex Ayoub; Zhuoran Yang; Zhaoran Wang; Doina Precup; Lin Yang", "abstract": "We propose a model-free reinforcement learning algorithm inspired by the popular randomized least squares value iteration (RLSVI) algorithm as well as the optimism principle. Unlike existing upper-confidence-bound (UCB) based approaches, which are often computationally intractable, our algorithm drives exploration by simply perturbing the training data with judiciously chosen i.i.d. scalar noises. To attain optimistic value function estimation without resorting to a UCB-style bonus, we introduce an optimistic reward sampling procedure. When the value functions can be represented by a function class $\\mathcal{F}$, our algorithm achieves a worst-case regret bound of $\\tilde{O}(\\mathrm{poly}(d_EH)\\sqrt{T})$ where $T$ is the time elapsed, $H$ is the planning horizon and $d_E$ is the \\emph{eluder dimension} of $\\mathcal{F}$. In the linear setting, our algorithm reduces to LSVI-PHE, a variant of RLSVI, that enjoys an $\\tilde{\\mathcal{O}}(\\sqrt{d^3H^3T})$ regret. We complement the theory with an empirical evaluation across known difficult exploration tasks.", "bibtex": "@InProceedings{pmlr-v139-ishfaq21a,\n title = \t {Randomized Exploration in Reinforcement Learning with General Value Function Approximation},\n author = {Ishfaq, Haque and Cui, Qiwen and Nguyen, Viet and Ayoub, Alex and Yang, Zhuoran and Wang, Zhaoran and Precup, Doina and Yang, Lin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4607--4616},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ishfaq21a/ishfaq21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ishfaq21a.html},\n abstract = \t {We propose a model-free reinforcement learning algorithm inspired by the popular randomized least squares value iteration (RLSVI) algorithm as well as the optimism principle. Unlike existing upper-confidence-bound (UCB) based approaches, which are often computationally intractable, our algorithm drives exploration by simply perturbing the training data with judiciously chosen i.i.d. scalar noises. To attain optimistic value function estimation without resorting to a UCB-style bonus, we introduce an optimistic reward sampling procedure. When the value functions can be represented by a function class $\\mathcal{F}$, our algorithm achieves a worst-case regret bound of $\\tilde{O}(\\mathrm{poly}(d_EH)\\sqrt{T})$ where $T$ is the time elapsed, $H$ is the planning horizon and $d_E$ is the \\emph{eluder dimension} of $\\mathcal{F}$. In the linear setting, our algorithm reduces to LSVI-PHE, a variant of RLSVI, that enjoys an $\\tilde{\\mathcal{O}}(\\sqrt{d^3H^3T})$ regret. We complement the theory with an empirical evaluation across known difficult exploration tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/ishfaq21a/ishfaq21a.pdf", "supp": "", "pdf_size": 2996122, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3685750443721916834&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Mila+School of Computer Science, McGill University; School of Mathematical Science, Peking University; Mila+School of Computer Science, McGill University; Amii and Department of Computing Science, University of Alberta; Department of Operations Research and Financial Engineering, Princeton University; Industrial Engineering Management Sciences, Northwestern University; Mila+School of Computer Science, McGill University+DeepMind, Montreal; Department of Electrical and Computer Engineering, University of California, Los Angeles", "aff_domain": "mail.mcgill.ca; ; ; ; ; ; ; ", "email": "mail.mcgill.ca; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/ishfaq21a.html", "aff_unique_index": "0+1;2;0+1;3;4;5;0+1+6;7", "aff_unique_norm": "Mila;McGill University;Peking University;University of Alberta;Princeton University;Northwestern University;DeepMind;University of California, Los Angeles", "aff_unique_dep": "Quebec Artificial Intelligence Institute;School of Computer Science;School of Mathematical Science;Department of Computing Science;Department of Operations Research and Financial Engineering;Industrial Engineering Management Sciences;;Department of Electrical and Computer Engineering", "aff_unique_url": "https://mila.quebec;https://www.mcgill.ca;http://www.pku.edu.cn;https://www.ualberta.ca;https://www.princeton.edu;https://www.northwestern.edu;https://deepmind.com;https://www.ucla.edu", "aff_unique_abbr": "Mila;McGill;PKU;UAlberta;Princeton;NU;DeepMind;UCLA", "aff_campus_unique_index": "1;2;1;1+1;3", "aff_campus_unique": ";Montreal;Beijing;Los Angeles", "aff_country_unique_index": "0+0;1;0+0;0;2;2;0+0+0;2", "aff_country_unique": "Canada;China;United States" }, { "title": "Rate-Distortion Analysis of Minimum Excess Risk in Bayesian Learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9217", "id": "9217", "proceeding": "http://proceedings.mlr.press/v139/hafez-kolahi21a.html", "slides": "", "author_site": "Hassan Hafez-Kolahi, Behrad Moniri, Shohreh Kasaei, Mahdieh Soleymani Baghshah", "author": "Hassan Hafez-Kolahi; Behrad Moniri; Shohreh Kasaei; Mahdieh Soleymani Baghshah", "abstract": "In parametric Bayesian learning, a prior is assumed on the parameter $W$ which determines the distribution of samples. In this setting, Minimum Excess Risk (MER) is defined as the difference between the minimum expected loss achievable when learning from data and the minimum expected loss that could be achieved if $W$ was observed. In this paper, we build upon and extend the recent results of (Xu & Raginsky, 2020) to analyze the MER in Bayesian learning and derive information-theoretic bounds on it. We formulate the problem as a (constrained) rate-distortion optimization and show how the solution can be bounded above and below by two other rate-distortion functions that are easier to study. The lower bound represents the minimum possible excess risk achievable by \\emph{any} process using $R$ bits of information from the parameter $W$. For the upper bound, the optimization is further constrained to use $R$ bits from the training set, a setting which relates MER to information-theoretic bounds on the generalization gap in frequentist learning. We derive information-theoretic bounds on the difference between these upper and lower bounds and show that they can provide order-wise tight rates for MER under certain conditions. This analysis gives more insight into the information-theoretic nature of Bayesian learning as well as providing novel bounds.", "bibtex": "@InProceedings{pmlr-v139-hafez-kolahi21a,\n title = \t {Rate-Distortion Analysis of Minimum Excess Risk in Bayesian Learning},\n author = {Hafez-Kolahi, Hassan and Moniri, Behrad and Kasaei, Shohreh and Baghshah, Mahdieh Soleymani},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3998--4007},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hafez-kolahi21a/hafez-kolahi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hafez-kolahi21a.html},\n abstract = \t {In parametric Bayesian learning, a prior is assumed on the parameter $W$ which determines the distribution of samples. In this setting, Minimum Excess Risk (MER) is defined as the difference between the minimum expected loss achievable when learning from data and the minimum expected loss that could be achieved if $W$ was observed. In this paper, we build upon and extend the recent results of (Xu & Raginsky, 2020) to analyze the MER in Bayesian learning and derive information-theoretic bounds on it. We formulate the problem as a (constrained) rate-distortion optimization and show how the solution can be bounded above and below by two other rate-distortion functions that are easier to study. The lower bound represents the minimum possible excess risk achievable by \\emph{any} process using $R$ bits of information from the parameter $W$. For the upper bound, the optimization is further constrained to use $R$ bits from the training set, a setting which relates MER to information-theoretic bounds on the generalization gap in frequentist learning. We derive information-theoretic bounds on the difference between these upper and lower bounds and show that they can provide order-wise tight rates for MER under certain conditions. This analysis gives more insight into the information-theoretic nature of Bayesian learning as well as providing novel bounds.}\n}", "pdf": "http://proceedings.mlr.press/v139/hafez-kolahi21a/hafez-kolahi21a.pdf", "supp": "", "pdf_size": 377600, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7163211365017967193&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Engineering, Sharif University of Technology, Tehran, Iran; Department of Electrical Engineering, Sharif University of Technology, Tehran, Iran; Department of Computer Engineering, Sharif University of Technology, Tehran, Iran; Department of Computer Engineering, Sharif University of Technology, Tehran, Iran", "aff_domain": "sharif.edu;sharif.edu;sharif.edu;sharif.edu", "email": "sharif.edu;sharif.edu;sharif.edu;sharif.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/hafez-kolahi21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Sharif University of Technology", "aff_unique_dep": "Department of Computer Engineering", "aff_unique_url": "https://www.sharif.edu", "aff_unique_abbr": "Sharif UT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Tehran", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Iran" }, { "title": "Re-understanding Finite-State Representations of Recurrent Policy Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8627", "id": "8627", "proceeding": "http://proceedings.mlr.press/v139/danesh21a.html", "slides": "/media/icml-2021/Slides/8627.pdf", "author_site": "Mohamad H Danesh, Anurag Koul, Alan Fern, Saeed Khorram", "author": "Mohamad H Danesh; Anurag Koul; Alan Fern; Saeed Khorram", "abstract": "We introduce an approach for understanding control policies represented as recurrent neural networks. Recent work has approached this problem by transforming such recurrent policy networks into finite-state machines (FSM) and then analyzing the equivalent minimized FSM. While this led to interesting insights, the minimization process can obscure a deeper understanding of a machine\u2019s operation by merging states that are semantically distinct. To address this issue, we introduce an analysis approach that starts with an unminimized FSM and applies more-interpretable reductions that preserve the key decision points of the policy. We also contribute an attention tool to attain a deeper understanding of the role of observations in the decisions. Our case studies on 7 Atari games and 3 control benchmarks demonstrate that the approach can reveal insights that have not been previously noticed.", "bibtex": "@InProceedings{pmlr-v139-danesh21a,\n title = \t {Re-understanding Finite-State Representations of Recurrent Policy Networks},\n author = {Danesh, Mohamad H and Koul, Anurag and Fern, Alan and Khorram, Saeed},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2388--2397},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/danesh21a/danesh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/danesh21a.html},\n abstract = \t {We introduce an approach for understanding control policies represented as recurrent neural networks. Recent work has approached this problem by transforming such recurrent policy networks into finite-state machines (FSM) and then analyzing the equivalent minimized FSM. While this led to interesting insights, the minimization process can obscure a deeper understanding of a machine\u2019s operation by merging states that are semantically distinct. To address this issue, we introduce an analysis approach that starts with an unminimized FSM and applies more-interpretable reductions that preserve the key decision points of the policy. We also contribute an attention tool to attain a deeper understanding of the role of observations in the decisions. Our case studies on 7 Atari games and 3 control benchmarks demonstrate that the approach can reveal insights that have not been previously noticed.}\n}", "pdf": "http://proceedings.mlr.press/v139/danesh21a/danesh21a.pdf", "supp": "", "pdf_size": 1763579, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2835459084556077542&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "Department of EECS, Oregon State University, Corvallis, OR, USA; Department of EECS, Oregon State University, Corvallis, OR, USA; Department of EECS, Oregon State University, Corvallis, OR, USA; Department of EECS, Oregon State University, Corvallis, OR, USA", "aff_domain": "oregonstate.edu; ; ; ", "email": "oregonstate.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/danesh21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Oregon State University", "aff_unique_dep": "Department of EECS", "aff_unique_url": "https://oregonstate.edu", "aff_unique_abbr": "OSU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Corvallis", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Reasoning Over Virtual Knowledge Bases With Open Predicate Relations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9943", "id": "9943", "proceeding": "http://proceedings.mlr.press/v139/sun21e.html", "slides": "", "author_site": "Haitian Sun, Patrick Verga, Bhuwan Dhingra, Ruslan Salakhutdinov, William Cohen", "author": "Haitian Sun; Patrick Verga; Bhuwan Dhingra; Ruslan Salakhutdinov; William W Cohen", "abstract": "We present the Open Predicate Query Language (OPQL); a method for constructing a virtual KB (VKB) trained entirely from text. Large Knowledge Bases (KBs) are indispensable for a wide-range of industry applications such as question answering and recommendation. Typically, KBs encode world knowledge in a structured, readily accessible form derived from laborious human annotation efforts. Unfortunately, while they are extremely high precision, KBs are inevitably highly incomplete and automated methods for enriching them are far too inaccurate. Instead, OPQL constructs a VKB by encoding and indexing a set of relation mentions in a way that naturally enables reasoning and can be trained without any structured supervision. We demonstrate that OPQL outperforms prior VKB methods on two different KB reasoning tasks and, additionally, can be used as an external memory integrated into a language model (OPQL-LM) leading to improvements on two open-domain question answering tasks.", "bibtex": "@InProceedings{pmlr-v139-sun21e,\n title = \t {Reasoning Over Virtual Knowledge Bases With Open Predicate Relations},\n author = {Sun, Haitian and Verga, Patrick and Dhingra, Bhuwan and Salakhutdinov, Ruslan and Cohen, William W},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9966--9977},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sun21e/sun21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/sun21e.html},\n abstract = \t {We present the Open Predicate Query Language (OPQL); a method for constructing a virtual KB (VKB) trained entirely from text. Large Knowledge Bases (KBs) are indispensable for a wide-range of industry applications such as question answering and recommendation. Typically, KBs encode world knowledge in a structured, readily accessible form derived from laborious human annotation efforts. Unfortunately, while they are extremely high precision, KBs are inevitably highly incomplete and automated methods for enriching them are far too inaccurate. Instead, OPQL constructs a VKB by encoding and indexing a set of relation mentions in a way that naturally enables reasoning and can be trained without any structured supervision. We demonstrate that OPQL outperforms prior VKB methods on two different KB reasoning tasks and, additionally, can be used as an external memory integrated into a language model (OPQL-LM) leading to improvements on two open-domain question answering tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/sun21e/sun21e.pdf", "supp": "", "pdf_size": 673734, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16424640886472330707&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Carnegie Mellon University; Google Research; Google Research; Carnegie Mellon University; Google Research", "aff_domain": "cs.cmu.edu;google.com;google.com;cs.cmu.edu;google.com", "email": "cs.cmu.edu;google.com;google.com;cs.cmu.edu;google.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/sun21e.html", "aff_unique_index": "0;1;1;0;1", "aff_unique_norm": "Carnegie Mellon University;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.cmu.edu;https://research.google", "aff_unique_abbr": "CMU;Google Research", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Recomposing the Reinforcement Learning Building Blocks with Hypernetworks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8489", "id": "8489", "proceeding": "http://proceedings.mlr.press/v139/sarafian21a.html", "slides": "/media/icml-2021/Slides/8489.pdf", "author_site": "Elad Sarafian, Shai Keynan, Sarit Kraus", "author": "Elad Sarafian; Shai Keynan; Sarit Kraus", "abstract": "The Reinforcement Learning (RL) building blocks, i.e. $Q$-functions and policy networks, usually take elements from the cartesian product of two domains as input. In particular, the input of the $Q$-function is both the state and the action, and in multi-task problems (Meta-RL) the policy can take a state and a context. Standard architectures tend to ignore these variables\u2019 underlying interpretations and simply concatenate their features into a single vector. In this work, we argue that this choice may lead to poor gradient estimation in actor-critic algorithms and high variance learning steps in Meta-RL algorithms. To consider the interaction between the input variables, we suggest using a Hypernetwork architecture where a primary network determines the weights of a conditional dynamic network. We show that this approach improves the gradient approximation and reduces the learning step variance, which both accelerates learning and improves the final performance. We demonstrate a consistent improvement across different locomotion tasks and different algorithms both in RL (TD3 and SAC) and in Meta-RL (MAML and PEARL).", "bibtex": "@InProceedings{pmlr-v139-sarafian21a,\n title = \t {Recomposing the Reinforcement Learning Building Blocks with Hypernetworks},\n author = {Sarafian, Elad and Keynan, Shai and Kraus, Sarit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9301--9312},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sarafian21a/sarafian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sarafian21a.html},\n abstract = \t {The Reinforcement Learning (RL) building blocks, i.e. $Q$-functions and policy networks, usually take elements from the cartesian product of two domains as input. In particular, the input of the $Q$-function is both the state and the action, and in multi-task problems (Meta-RL) the policy can take a state and a context. Standard architectures tend to ignore these variables\u2019 underlying interpretations and simply concatenate their features into a single vector. In this work, we argue that this choice may lead to poor gradient estimation in actor-critic algorithms and high variance learning steps in Meta-RL algorithms. To consider the interaction between the input variables, we suggest using a Hypernetwork architecture where a primary network determines the weights of a conditional dynamic network. We show that this approach improves the gradient approximation and reduces the learning step variance, which both accelerates learning and improves the final performance. We demonstrate a consistent improvement across different locomotion tasks and different algorithms both in RL (TD3 and SAC) and in Meta-RL (MAML and PEARL).}\n}", "pdf": "http://proceedings.mlr.press/v139/sarafian21a/sarafian21a.pdf", "supp": "", "pdf_size": 1224164, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11431615300192492432&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Bar-Ilan University, Ramat-Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat-Gan, Israel; Department of Computer Science, Bar-Ilan University, Ramat-Gan, Israel", "aff_domain": "gmail.com;gmail.com; ", "email": "gmail.com;gmail.com; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sarafian21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Bar-Ilan University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.biu.ac.il", "aff_unique_abbr": "BIU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Ramat-Gan", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Recovering AES Keys with a Deep Cold Boot Attack", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9771", "id": "9771", "proceeding": "http://proceedings.mlr.press/v139/zimerman21a.html", "slides": "/media/icml-2021/Slides/9771.pdf", "author_site": "Itamar Zimerman, Eliya Nachmani, Lior Wolf", "author": "Itamar Zimerman; Eliya Nachmani; Lior Wolf", "abstract": "Cold boot attacks inspect the corrupted random access memory soon after the power has been shut down. While most of the bits have been corrupted, many bits, at random locations, have not. Since the keys in many encryption schemes are being expanded in memory into longer keys with fixed redundancies, the keys can often be restored. In this work we combine a deep error correcting code technique together with a modified SAT solver scheme in order to apply the attack to AES keys. Even though AES consists Rijndael SBOX elements, that are specifically designed to be resistant to linear and differential cryptanalysis, our method provides a novel formalization of the AES key scheduling as a computational graph, which is implemented by neural message passing network. Our results show that our methods outperform the state of the art attack methods by a very large gap.", "bibtex": "@InProceedings{pmlr-v139-zimerman21a,\n title = \t {Recovering AES Keys with a Deep Cold Boot Attack},\n author = {Zimerman, Itamar and Nachmani, Eliya and Wolf, Lior},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12955--12966},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zimerman21a/zimerman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zimerman21a.html},\n abstract = \t {Cold boot attacks inspect the corrupted random access memory soon after the power has been shut down. While most of the bits have been corrupted, many bits, at random locations, have not. Since the keys in many encryption schemes are being expanded in memory into longer keys with fixed redundancies, the keys can often be restored. In this work we combine a deep error correcting code technique together with a modified SAT solver scheme in order to apply the attack to AES keys. Even though AES consists Rijndael SBOX elements, that are specifically designed to be resistant to linear and differential cryptanalysis, our method provides a novel formalization of the AES key scheduling as a computational graph, which is implemented by neural message passing network. Our results show that our methods outperform the state of the art attack methods by a very large gap.}\n}", "pdf": "http://proceedings.mlr.press/v139/zimerman21a/zimerman21a.pdf", "supp": "", "pdf_size": 1064751, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18445002124349140394&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Tel Aviv University; Tel Aviv University + Facebook AI Research; Tel Aviv University", "aff_domain": "mail.tau.ac.il;gmail.com;gmail.com", "email": "mail.tau.ac.il;gmail.com;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zimerman21a.html", "aff_unique_index": "0;0+1;0", "aff_unique_norm": "Tel Aviv University;Meta", "aff_unique_dep": ";Facebook AI Research", "aff_unique_url": "https://www.tau.ac.il;https://research.facebook.com", "aff_unique_abbr": "TAU;FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+1;0", "aff_country_unique": "Israel;United States" }, { "title": "Regret Minimization in Stochastic Non-Convex Learning via a Proximal-Gradient Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9651", "id": "9651", "proceeding": "http://proceedings.mlr.press/v139/hallak21a.html", "slides": "", "author_site": "Nadav Hallak, Panayotis Mertikopoulos, Volkan Cevher", "author": "Nadav Hallak; Panayotis Mertikopoulos; Volkan Cevher", "abstract": "This paper develops a methodology for regret minimization with stochastic first-order oracle feedback in online, constrained, non-smooth, non-convex problems. In this setting, the minimization of external regret is beyond reach for first-order methods, and there are no gradient-based algorithmic frameworks capable of providing a solution. On that account, we propose a conceptual approach that leverages non-convex optimality measures, leading to a suitable generalization of the learner\u2019s local regret. We focus on a local regret measure defined via a proximal-gradient mapping, that also encompasses the original notion proposed by Hazan et al. (2017). To achieve no local regret in this setting, we develop a proximal-gradient method based on stochastic first-order feedback, and a simpler method for when access to a perfect first-order oracle is possible. Both methods are order-optimal (in the min-max sense), and we also establish a bound on the number of proximal-gradient queries these methods require. As an important application of our results, we also obtain a link between online and offline non-convex stochastic optimization manifested as a new proximal-gradient scheme with complexity guarantees matching those obtained via variance reduction techniques.", "bibtex": "@InProceedings{pmlr-v139-hallak21a,\n title = \t {Regret Minimization in Stochastic Non-Convex Learning via a Proximal-Gradient Approach},\n author = {Hallak, Nadav and Mertikopoulos, Panayotis and Cevher, Volkan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4008--4017},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hallak21a/hallak21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hallak21a.html},\n abstract = \t {This paper develops a methodology for regret minimization with stochastic first-order oracle feedback in online, constrained, non-smooth, non-convex problems. In this setting, the minimization of external regret is beyond reach for first-order methods, and there are no gradient-based algorithmic frameworks capable of providing a solution. On that account, we propose a conceptual approach that leverages non-convex optimality measures, leading to a suitable generalization of the learner\u2019s local regret. We focus on a local regret measure defined via a proximal-gradient mapping, that also encompasses the original notion proposed by Hazan et al. (2017). To achieve no local regret in this setting, we develop a proximal-gradient method based on stochastic first-order feedback, and a simpler method for when access to a perfect first-order oracle is possible. Both methods are order-optimal (in the min-max sense), and we also establish a bound on the number of proximal-gradient queries these methods require. As an important application of our results, we also obtain a link between online and offline non-convex stochastic optimization manifested as a new proximal-gradient scheme with complexity guarantees matching those obtained via variance reduction techniques.}\n}", "pdf": "http://proceedings.mlr.press/v139/hallak21a/hallak21a.pdf", "supp": "", "pdf_size": 370630, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9720715888742129795&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Faculty of Industrial Engineering and Management, The Technion, Haifa, Israel; Univ. Grenoble Alpes, CNRS, Inria, LIG, Grenoble, France, & Criteo AI Lab; \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL)", "aff_domain": "technion.ac.il; ; ", "email": "technion.ac.il; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hallak21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Technion;University Grenoble Alpes;EPFL", "aff_unique_dep": "Faculty of Industrial Engineering and Management;;", "aff_unique_url": "https://technion.ac.il;https://www.univ-grenoble-alpes.fr;https://www.epfl.ch", "aff_unique_abbr": "Technion;UGA;EPFL", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Haifa;Grenoble;", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Israel;France;Switzerland" }, { "title": "Regret and Cumulative Constraint Violation Analysis for Online Convex Optimization with Long Term Constraints", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9489", "id": "9489", "proceeding": "http://proceedings.mlr.press/v139/yi21b.html", "slides": "/media/icml-2021/Slides/9489.pdf", "author_site": "Xinlei Yi, Xiuxian Li, Tao Yang, Lihua Xie, Tianyou Chai, Karl Johansson", "author": "Xinlei Yi; Xiuxian Li; Tao Yang; Lihua Xie; Tianyou Chai; Karl Johansson", "abstract": "This paper considers online convex optimization with long term constraints, where constraints can be violated in intermediate rounds, but need to be satisfied in the long run. The cumulative constraint violation is used as the metric to measure constraint violations, which excludes the situation that strictly feasible constraints can compensate the effects of violated constraints. A novel algorithm is first proposed and it achieves an $\\mathcal{O}(T^{\\max\\{c,1-c\\}})$ bound for static regret and an $\\mathcal{O}(T^{(1-c)/2})$ bound for cumulative constraint violation, where $c\\in(0,1)$ is a user-defined trade-off parameter, and thus has improved performance compared with existing results. Both static regret and cumulative constraint violation bounds are reduced to $\\mathcal{O}(\\log(T))$ when the loss functions are strongly convex, which also improves existing results. %In order to bound the regret with respect to any comparator sequence, In order to achieve the optimal regret with respect to any comparator sequence, another algorithm is then proposed and it achieves the optimal $\\mathcal{O}(\\sqrt{T(1+P_T)})$ regret and an $\\mathcal{O}(\\sqrt{T})$ cumulative constraint violation, where $P_T$ is the path-length of the comparator sequence. Finally, numerical simulations are provided to illustrate the effectiveness of the theoretical results.", "bibtex": "@InProceedings{pmlr-v139-yi21b,\n title = \t {Regret and Cumulative Constraint Violation Analysis for Online Convex Optimization with Long Term Constraints},\n author = {Yi, Xinlei and Li, Xiuxian and Yang, Tao and Xie, Lihua and Chai, Tianyou and Johansson, Karl},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11998--12008},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yi21b/yi21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/yi21b.html},\n abstract = \t {This paper considers online convex optimization with long term constraints, where constraints can be violated in intermediate rounds, but need to be satisfied in the long run. The cumulative constraint violation is used as the metric to measure constraint violations, which excludes the situation that strictly feasible constraints can compensate the effects of violated constraints. A novel algorithm is first proposed and it achieves an $\\mathcal{O}(T^{\\max\\{c,1-c\\}})$ bound for static regret and an $\\mathcal{O}(T^{(1-c)/2})$ bound for cumulative constraint violation, where $c\\in(0,1)$ is a user-defined trade-off parameter, and thus has improved performance compared with existing results. Both static regret and cumulative constraint violation bounds are reduced to $\\mathcal{O}(\\log(T))$ when the loss functions are strongly convex, which also improves existing results. %In order to bound the regret with respect to any comparator sequence, In order to achieve the optimal regret with respect to any comparator sequence, another algorithm is then proposed and it achieves the optimal $\\mathcal{O}(\\sqrt{T(1+P_T)})$ regret and an $\\mathcal{O}(\\sqrt{T})$ cumulative constraint violation, where $P_T$ is the path-length of the comparator sequence. Finally, numerical simulations are provided to illustrate the effectiveness of the theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/yi21b/yi21b.pdf", "supp": "", "pdf_size": 981423, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4490116160571963049&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "School of Electrical Engineering and Computer Science, and Digital Futures, KTH Royal Institute of Technology, Stockholm, Sweden; Department of Control Science and Engineering, College of Electronics and Information Engineering, Institute for Advanced Study, and Shanghai Research Institute for Intelligent Autonomous Systems, Tongji University, Shanghai, China; State Key Laboratory of Synthetical Automation for Process Industries, Northeastern University, Shenyang, China; School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore; State Key Laboratory of Synthetical Automation for Process Industries, Northeastern University, Shenyang, China; School of Electrical Engineering and Computer Science, and Digital Futures, KTH Royal Institute of Technology, Stockholm, Sweden", "aff_domain": "kth.se;tongji.edu.cn;mail.neu.edu.cn;ntu.edu.sg;neu.edu.cn;kth.se", "email": "kth.se;tongji.edu.cn;mail.neu.edu.cn;ntu.edu.sg;neu.edu.cn;kth.se", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yi21b.html", "aff_unique_index": "0;1;2;3;2;0", "aff_unique_norm": "KTH Royal Institute of Technology;Tongji University;Northeastern University;Nanyang Technological University", "aff_unique_dep": "School of Electrical Engineering and Computer Science;Department of Control Science and Engineering;State Key Laboratory of Synthetical Automation for Process Industries;School of Electrical and Electronic Engineering", "aff_unique_url": "https://www.kth.se;https://www.tongji.edu.cn;http://www.neu.edu.cn/;https://www.ntu.edu.sg", "aff_unique_abbr": "KTH;Tongji;NEU;NTU", "aff_campus_unique_index": "0;1;2;3;2;0", "aff_campus_unique": "Stockholm;Shanghai;Shenyang;Singapore", "aff_country_unique_index": "0;1;1;2;1;0", "aff_country_unique": "Sweden;China;Singapore" }, { "title": "Regularized Online Allocation Problems: Fairness and Beyond", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8455", "id": "8455", "proceeding": "http://proceedings.mlr.press/v139/balseiro21a.html", "slides": "/media/icml-2021/Slides/8455.pdf", "author_site": "Santiago Balseiro, Haihao Lu, Vahab Mirrokni", "author": "Santiago Balseiro; Haihao Lu; Vahab Mirrokni", "abstract": "Online allocation problems with resource constraints have a rich history in computer science and operations research. In this paper, we introduce the regularized online allocation problem, a variant that includes a non-linear regularizer acting on the total resource consumption. In this problem, requests repeatedly arrive over time and, for each request, a decision maker needs to take an action that generates a reward and consumes resources. The objective is to simultaneously maximize total rewards and the value of the regularizer subject to the resource constraints. Our primary motivation is the online allocation of internet advertisements wherein firms seek to maximize additive objectives such as the revenue or efficiency of the allocation. By introducing a regularizer, firms can account for the fairness of the allocation or, alternatively, punish under-delivery of advertisements\u2014two common desiderata in internet advertising markets. We design an algorithm when arrivals are drawn independently from a distribution that is unknown to the decision maker. Our algorithm is simple, fast, and attains the optimal order of sub-linear regret compared to the optimal allocation with the benefit of hindsight. Numerical experiments confirm the effectiveness of the proposed algorithm and of the regularizers in an internet advertising application.", "bibtex": "@InProceedings{pmlr-v139-balseiro21a,\n title = \t {Regularized Online Allocation Problems: Fairness and Beyond},\n author = {Balseiro, Santiago and Lu, Haihao and Mirrokni, Vahab},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {630--639},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/balseiro21a/balseiro21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/balseiro21a.html},\n abstract = \t {Online allocation problems with resource constraints have a rich history in computer science and operations research. In this paper, we introduce the regularized online allocation problem, a variant that includes a non-linear regularizer acting on the total resource consumption. In this problem, requests repeatedly arrive over time and, for each request, a decision maker needs to take an action that generates a reward and consumes resources. The objective is to simultaneously maximize total rewards and the value of the regularizer subject to the resource constraints. Our primary motivation is the online allocation of internet advertisements wherein firms seek to maximize additive objectives such as the revenue or efficiency of the allocation. By introducing a regularizer, firms can account for the fairness of the allocation or, alternatively, punish under-delivery of advertisements\u2014two common desiderata in internet advertising markets. We design an algorithm when arrivals are drawn independently from a distribution that is unknown to the decision maker. Our algorithm is simple, fast, and attains the optimal order of sub-linear regret compared to the optimal allocation with the benefit of hindsight. Numerical experiments confirm the effectiveness of the proposed algorithm and of the regularizers in an internet advertising application.}\n}", "pdf": "http://proceedings.mlr.press/v139/balseiro21a/balseiro21a.pdf", "supp": "", "pdf_size": 727148, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17744133517310457487&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Columbia University, New York, USA+Google Research, New York, USA; Google Research, New York, USA+University of Chicago; Google Research, New York, USA", "aff_domain": "columbia.edu;chicagobooth.edu; ", "email": "columbia.edu;chicagobooth.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/balseiro21a.html", "aff_unique_index": "0+1;1+2;1", "aff_unique_norm": "Columbia University;Google;University of Chicago", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.columbia.edu;https://research.google;https://www.uchicago.edu", "aff_unique_abbr": "Columbia;Google Research;UChicago", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "New York;", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Regularized Submodular Maximization at Scale", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8669", "id": "8669", "proceeding": "http://proceedings.mlr.press/v139/kazemi21a.html", "slides": "/media/icml-2021/Slides/8669.pdf", "author_site": "Ehsan Kazemi, shervin minaee, Moran Feldman, Amin Karbasi", "author": "Ehsan Kazemi; Shervin Minaee; Moran Feldman; Amin Karbasi", "abstract": "In this paper, we propose scalable methods for maximizing a regularized submodular function $f \\triangleq g-\\ell$ expressed as the difference between a monotone submodular function $g$ and a modular function $\\ell$. Submodularity is inherently related to the notions of diversity, coverage, and representativeness. In particular, finding the mode (i.e., the most likely configuration) of many popular probabilistic models of diversity, such as determinantal point processes and strongly log-concave distributions, involves maximization of (regularized) submodular functions. Since a regularized function $f$ can potentially take on negative values, the classic theory of submodular maximization, which heavily relies on the non-negativity assumption of submodular functions, is not applicable. To circumvent this challenge, we develop the first one-pass streaming algorithm for maximizing a regularized submodular function subject to a $k$-cardinality constraint. Furthermore, we develop the first distributed algorithm that returns a solution $S$ in $O(1/ \\epsilon)$ rounds of MapReduce computation. We highlight that our result, even for the unregularized case where the modular term $\\ell$ is zero, improves the memory and communication complexity of the state-of-the-art by a factor of $O(1/ \\epsilon)$ while arguably provides a simpler distributed algorithm and a unifying analysis. We empirically study the performance of our scalable methods on a set of real-life applications, including finding the mode of negatively correlated distributions, vertex cover of social networks, and several data summarization tasks.", "bibtex": "@InProceedings{pmlr-v139-kazemi21a,\n title = \t {Regularized Submodular Maximization at Scale},\n author = {Kazemi, Ehsan and Minaee, Shervin and Feldman, Moran and Karbasi, Amin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5356--5366},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kazemi21a/kazemi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kazemi21a.html},\n abstract = \t {In this paper, we propose scalable methods for maximizing a regularized submodular function $f \\triangleq g-\\ell$ expressed as the difference between a monotone submodular function $g$ and a modular function $\\ell$. Submodularity is inherently related to the notions of diversity, coverage, and representativeness. In particular, finding the mode (i.e., the most likely configuration) of many popular probabilistic models of diversity, such as determinantal point processes and strongly log-concave distributions, involves maximization of (regularized) submodular functions. Since a regularized function $f$ can potentially take on negative values, the classic theory of submodular maximization, which heavily relies on the non-negativity assumption of submodular functions, is not applicable. To circumvent this challenge, we develop the first one-pass streaming algorithm for maximizing a regularized submodular function subject to a $k$-cardinality constraint. Furthermore, we develop the first distributed algorithm that returns a solution $S$ in $O(1/ \\epsilon)$ rounds of MapReduce computation. We highlight that our result, even for the unregularized case where the modular term $\\ell$ is zero, improves the memory and communication complexity of the state-of-the-art by a factor of $O(1/ \\epsilon)$ while arguably provides a simpler distributed algorithm and a unifying analysis. We empirically study the performance of our scalable methods on a set of real-life applications, including finding the mode of negatively correlated distributions, vertex cover of social networks, and several data summarization tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/kazemi21a/kazemi21a.pdf", "supp": "", "pdf_size": 1414587, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4124217008644621509&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Google, Z\u00fcrich, Switzerland; Snap Inc; Department of Computer Science, University of Haifa, Israel; Yale Institute for Network Science, Yale University", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kazemi21a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Google;Snap Inc;University of Haifa;Yale University", "aff_unique_dep": "Google;;Department of Computer Science;Yale Institute for Network Science", "aff_unique_url": "https://www.google.ch;https://www.snapinc.com;https://www.haifa.ac.il;https://www.yale.edu", "aff_unique_abbr": "Google;Snap;UoH;Yale", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Z\u00fcrich;;New Haven", "aff_country_unique_index": "0;1;2;1", "aff_country_unique": "Switzerland;United States;Israel" }, { "title": "Regularizing towards Causal Invariance: Linear Models with Proxies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9911", "id": "9911", "proceeding": "http://proceedings.mlr.press/v139/oberst21a.html", "slides": "/media/icml-2021/Slides/9911.pdf", "author_site": "Michael Oberst, Nikolaj Thams, Jonas Peters, David Sontag", "author": "Michael Oberst; Nikolaj Thams; Jonas Peters; David Sontag", "abstract": "We propose a method for learning linear models whose predictive performance is robust to causal interventions on unobserved variables, when noisy proxies of those variables are available. Our approach takes the form of a regularization term that trades off between in-distribution performance and robustness to interventions. Under the assumption of a linear structural causal model, we show that a single proxy can be used to create estimators that are prediction optimal under interventions of bounded strength. This strength depends on the magnitude of the measurement noise in the proxy, which is, in general, not identifiable. In the case of two proxy variables, we propose a modified estimator that is prediction optimal under interventions up to a known strength. We further show how to extend these estimators to scenarios where additional information about the \"test time\" intervention is available during training. We evaluate our theoretical findings in synthetic experiments and using real data of hourly pollution levels across several cities in China.", "bibtex": "@InProceedings{pmlr-v139-oberst21a,\n title = \t {Regularizing towards Causal Invariance: Linear Models with Proxies},\n author = {Oberst, Michael and Thams, Nikolaj and Peters, Jonas and Sontag, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8260--8270},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/oberst21a/oberst21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/oberst21a.html},\n abstract = \t {We propose a method for learning linear models whose predictive performance is robust to causal interventions on unobserved variables, when noisy proxies of those variables are available. Our approach takes the form of a regularization term that trades off between in-distribution performance and robustness to interventions. Under the assumption of a linear structural causal model, we show that a single proxy can be used to create estimators that are prediction optimal under interventions of bounded strength. This strength depends on the magnitude of the measurement noise in the proxy, which is, in general, not identifiable. In the case of two proxy variables, we propose a modified estimator that is prediction optimal under interventions up to a known strength. We further show how to extend these estimators to scenarios where additional information about the \"test time\" intervention is available during training. We evaluate our theoretical findings in synthetic experiments and using real data of hourly pollution levels across several cities in China.}\n}", "pdf": "http://proceedings.mlr.press/v139/oberst21a/oberst21a.pdf", "supp": "", "pdf_size": 1001196, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5547608297314715512&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "EECS, MIT, Cambridge, USA; Department of Mathematical Sciences, University of Copenhagen, Copenhagen, Denmark; Department of Mathematical Sciences, University of Copenhagen, Copenhagen, Denmark; EECS, MIT, Cambridge, USA", "aff_domain": "mit.edu; ; ; ", "email": "mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/oberst21a.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "Massachusetts Institute of Technology;University of Copenhagen", "aff_unique_dep": "Electrical Engineering and Computer Science;Department of Mathematical Sciences", "aff_unique_url": "https://www.mit.edu;https://www.ku.dk", "aff_unique_abbr": "MIT;UCPH", "aff_campus_unique_index": "0;1;1;0", "aff_campus_unique": "Cambridge;Copenhagen", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United States;Denmark" }, { "title": "Reinforcement Learning Under Moral Uncertainty", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9589", "id": "9589", "proceeding": "http://proceedings.mlr.press/v139/ecoffet21a.html", "slides": "/media/icml-2021/Slides/9589.pdf", "author_site": "Adrien Ecoffet, Joel Lehman", "author": "Adrien Ecoffet; Joel Lehman", "abstract": "An ambitious goal for machine learning is to create agents that behave ethically: The capacity to abide by human moral norms would greatly expand the context in which autonomous agents could be practically and safely deployed, e.g. fully autonomous vehicles will encounter charged moral decisions that complicate their deployment. While ethical agents could be trained by rewarding correct behavior under a specific moral theory (e.g. utilitarianism), there remains widespread disagreement about the nature of morality. Acknowledging such disagreement, recent work in moral philosophy proposes that ethical behavior requires acting under moral uncertainty, i.e. to take into account when acting that one\u2019s credence is split across several plausible ethical theories. This paper translates such insights to the field of reinforcement learning, proposes two training methods that realize different points among competing desiderata, and trains agents in simple environments to act under moral uncertainty. The results illustrate (1) how such uncertainty can help curb extreme behavior from commitment to single theories and (2) several technical complications arising from attempting to ground moral philosophy in RL (e.g. how can a principled trade-off between two competing but incomparable reward functions be reached). The aim is to catalyze progress towards morally-competent agents and highlight the potential of RL to contribute towards the computational grounding of moral philosophy.", "bibtex": "@InProceedings{pmlr-v139-ecoffet21a,\n title = \t {Reinforcement Learning Under Moral Uncertainty},\n author = {Ecoffet, Adrien and Lehman, Joel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2926--2936},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ecoffet21a/ecoffet21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ecoffet21a.html},\n abstract = \t {An ambitious goal for machine learning is to create agents that behave ethically: The capacity to abide by human moral norms would greatly expand the context in which autonomous agents could be practically and safely deployed, e.g. fully autonomous vehicles will encounter charged moral decisions that complicate their deployment. While ethical agents could be trained by rewarding correct behavior under a specific moral theory (e.g. utilitarianism), there remains widespread disagreement about the nature of morality. Acknowledging such disagreement, recent work in moral philosophy proposes that ethical behavior requires acting under moral uncertainty, i.e. to take into account when acting that one\u2019s credence is split across several plausible ethical theories. This paper translates such insights to the field of reinforcement learning, proposes two training methods that realize different points among competing desiderata, and trains agents in simple environments to act under moral uncertainty. The results illustrate (1) how such uncertainty can help curb extreme behavior from commitment to single theories and (2) several technical complications arising from attempting to ground moral philosophy in RL (e.g. how can a principled trade-off between two competing but incomparable reward functions be reached). The aim is to catalyze progress towards morally-competent agents and highlight the potential of RL to contribute towards the computational grounding of moral philosophy.}\n}", "pdf": "http://proceedings.mlr.press/v139/ecoffet21a/ecoffet21a.pdf", "supp": "", "pdf_size": 370281, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2905901650161533369&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Uber AI Labs, San Francisco, CA, USA+OpenAI, San Francisco, CA, USA; Uber AI Labs, San Francisco, CA, USA+OpenAI, San Francisco, CA, USA", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ecoffet21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Uber AI Labs;OpenAI", "aff_unique_dep": "AI Labs;", "aff_unique_url": "https://www.uber.com;https://openai.com", "aff_unique_abbr": "Uber AI Labs;OpenAI", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "San Francisco", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "Reinforcement Learning for Cost-Aware Markov Decision Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10597", "id": "10597", "proceeding": "http://proceedings.mlr.press/v139/suttle21a.html", "slides": "", "author_site": "Wesley A Suttle, Kaiqing Zhang, Zhuoran Yang, Ji Liu, David N Kraemer", "author": "Wesley Suttle; Kaiqing Zhang; Zhuoran Yang; Ji Liu; David Kraemer", "abstract": "Ratio maximization has applications in areas as diverse as finance, reward shaping for reinforcement learning (RL), and the development of safe artificial intelligence, yet there has been very little exploration of RL algorithms for ratio maximization. This paper addresses this deficiency by introducing two new, model-free RL algorithms for solving cost-aware Markov decision processes, where the goal is to maximize the ratio of long-run average reward to long-run average cost. The first algorithm is a two-timescale scheme based on relative value iteration (RVI) Q-learning and the second is an actor-critic scheme. The paper proves almost sure convergence of the former to the globally optimal solution in the tabular case and almost sure convergence of the latter under linear function approximation for the critic. Unlike previous methods, the two algorithms provably converge for general reward and cost functions under suitable conditions. The paper also provides empirical results demonstrating promising performance and lending strong support to the theoretical results.", "bibtex": "@InProceedings{pmlr-v139-suttle21a,\n title = \t {Reinforcement Learning for Cost-Aware Markov Decision Processes},\n author = {Suttle, Wesley and Zhang, Kaiqing and Yang, Zhuoran and Liu, Ji and Kraemer, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9989--9999},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/suttle21a/suttle21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/suttle21a.html},\n abstract = \t {Ratio maximization has applications in areas as diverse as finance, reward shaping for reinforcement learning (RL), and the development of safe artificial intelligence, yet there has been very little exploration of RL algorithms for ratio maximization. This paper addresses this deficiency by introducing two new, model-free RL algorithms for solving cost-aware Markov decision processes, where the goal is to maximize the ratio of long-run average reward to long-run average cost. The first algorithm is a two-timescale scheme based on relative value iteration (RVI) Q-learning and the second is an actor-critic scheme. The paper proves almost sure convergence of the former to the globally optimal solution in the tabular case and almost sure convergence of the latter under linear function approximation for the critic. Unlike previous methods, the two algorithms provably converge for general reward and cost functions under suitable conditions. The paper also provides empirical results demonstrating promising performance and lending strong support to the theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/suttle21a/suttle21a.pdf", "supp": "", "pdf_size": 541248, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11157148690169606895&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/suttle21a.html" }, { "title": "Reinforcement Learning of Implicit and Explicit Control Flow Instructions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9567", "id": "9567", "proceeding": "http://proceedings.mlr.press/v139/brooks21a.html", "slides": "", "author_site": "Ethan Brooks, Janarthanan Rajendran, Richard Lewis, Satinder Singh", "author": "Ethan Brooks; Janarthanan Rajendran; Richard L Lewis; Satinder Singh", "abstract": "Learning to flexibly follow task instructions in dynamic environments poses interesting challenges for reinforcement learning agents. We focus here on the problem of learning control flow that deviates from a strict step-by-step execution of instructions{\u2014}that is, control flow that may skip forward over parts of the instructions or return backward to previously completed or skipped steps. Demand for such flexible control arises in two fundamental ways: explicitly when control is specified in the instructions themselves (such as conditional branching and looping) and implicitly when stochastic environment dynamics require re-completion of instructions whose effects have been perturbed, or opportunistic skipping of instructions whose effects are already present. We formulate an attention-based architecture that meets these challenges by learning, from task reward only, to flexibly attend to and condition behavior on an internal encoding of the instructions. We test the architecture\u2019s ability to learn both explicit and implicit control in two illustrative domains\u2014one inspired by Minecraft and the other by StarCraft\u2014and show that the architecture exhibits zero-shot generalization to novel instructions of length greater than those in a training set, at a performance level unmatched by three baseline recurrent architectures and one ablation architecture.", "bibtex": "@InProceedings{pmlr-v139-brooks21a,\n title = \t {Reinforcement Learning of Implicit and Explicit Control Flow Instructions},\n author = {Brooks, Ethan and Rajendran, Janarthanan and Lewis, Richard L and Singh, Satinder},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1082--1091},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brooks21a/brooks21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brooks21a.html},\n abstract = \t {Learning to flexibly follow task instructions in dynamic environments poses interesting challenges for reinforcement learning agents. We focus here on the problem of learning control flow that deviates from a strict step-by-step execution of instructions{\u2014}that is, control flow that may skip forward over parts of the instructions or return backward to previously completed or skipped steps. Demand for such flexible control arises in two fundamental ways: explicitly when control is specified in the instructions themselves (such as conditional branching and looping) and implicitly when stochastic environment dynamics require re-completion of instructions whose effects have been perturbed, or opportunistic skipping of instructions whose effects are already present. We formulate an attention-based architecture that meets these challenges by learning, from task reward only, to flexibly attend to and condition behavior on an internal encoding of the instructions. We test the architecture\u2019s ability to learn both explicit and implicit control in two illustrative domains\u2014one inspired by Minecraft and the other by StarCraft\u2014and show that the architecture exhibits zero-shot generalization to novel instructions of length greater than those in a training set, at a performance level unmatched by three baseline recurrent architectures and one ablation architecture.}\n}", "pdf": "http://proceedings.mlr.press/v139/brooks21a/brooks21a.pdf", "supp": "", "pdf_size": 1622016, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16859306145242117745&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Michigan; Department of Computer Science, University of Michigan; Weinberg Institute for Cognitive Science, Departments of Psychology and Linguistics, University of Michigan; Department of Computer Science, University of Michigan", "aff_domain": "umich.edu; ; ; ", "email": "umich.edu; ; ; ", "github": "https://github.com/ethanabrooks/CoFCA-S", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/brooks21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "1", "aff_campus_unique": ";Ann Arbor", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Reinforcement Learning with Prototypical Representations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10423", "id": "10423", "proceeding": "http://proceedings.mlr.press/v139/yarats21a.html", "slides": "/media/icml-2021/Slides/10423.pdf", "author_site": "Denis Yarats, Rob Fergus, Alessandro Lazaric, Lerrel Pinto", "author": "Denis Yarats; Rob Fergus; Alessandro Lazaric; Lerrel Pinto", "abstract": "Learning effective representations in image-based environments is crucial for sample efficient Reinforcement Learning (RL). Unfortunately, in RL, representation learning is confounded with the exploratory experience of the agent \u2013 learning a useful representation requires diverse data, while effective exploration is only possible with coherent representations. Furthermore, we would like to learn representations that not only generalize across tasks but also accelerate downstream exploration for efficient task-specific training. To address these challenges we propose Proto-RL, a self-supervised framework that ties representation learning with exploration through prototypical representations. These prototypes simultaneously serve as a summarization of the exploratory experience of an agent as well as a basis for representing observations. We pre-train these task-agnostic representations and prototypes on environments without downstream task information. This enables state-of-the-art downstream policy learning on a set of difficult continuous control tasks.", "bibtex": "@InProceedings{pmlr-v139-yarats21a,\n title = \t {Reinforcement Learning with Prototypical Representations},\n author = {Yarats, Denis and Fergus, Rob and Lazaric, Alessandro and Pinto, Lerrel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11920--11931},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yarats21a/yarats21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yarats21a.html},\n abstract = \t {Learning effective representations in image-based environments is crucial for sample efficient Reinforcement Learning (RL). Unfortunately, in RL, representation learning is confounded with the exploratory experience of the agent \u2013 learning a useful representation requires diverse data, while effective exploration is only possible with coherent representations. Furthermore, we would like to learn representations that not only generalize across tasks but also accelerate downstream exploration for efficient task-specific training. To address these challenges we propose Proto-RL, a self-supervised framework that ties representation learning with exploration through prototypical representations. These prototypes simultaneously serve as a summarization of the exploratory experience of an agent as well as a basis for representing observations. We pre-train these task-agnostic representations and prototypes on environments without downstream task information. This enables state-of-the-art downstream policy learning on a set of difficult continuous control tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/yarats21a/yarats21a.pdf", "supp": "", "pdf_size": 2632720, "gs_citation": 279, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9757208871404683122&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "New York University1+Facebook AI Research2; New York University1; Facebook AI Research2; New York University1", "aff_domain": "cs.nyu.edu; ; ; ", "email": "cs.nyu.edu; ; ; ", "github": "https://github.com/denisyarats/proto", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yarats21a.html", "aff_unique_index": "0+1;0;1;0", "aff_unique_norm": "New York University;Meta", "aff_unique_dep": ";Facebook AI Research", "aff_unique_url": "https://www.nyu.edu;https://research.facebook.com", "aff_unique_abbr": "NYU;FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Relative Deviation Margin Bounds", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10193", "id": "10193", "proceeding": "http://proceedings.mlr.press/v139/cortes21a.html", "slides": "", "author_site": "Corinna Cortes, Mehryar Mohri, Ananda Theertha Suresh", "author": "Corinna Cortes; Mehryar Mohri; Ananda Theertha Suresh", "abstract": "We present a series of new and more favorable margin-based learning guarantees that depend on the empirical margin loss of a predictor. e give two types of learning bounds, in terms of either the Rademacher complexity or the empirical $\\ell_\\infty$-covering number of the hypothesis set used, both distribution-dependent and valid for general families. Furthermore, using our relative deviation margin bounds, we derive distribution-dependent generalization bounds for unbounded loss functions under the assumption of a finite moment. We also briefly highlight several applications of these bounds and discuss their connection with existing results.", "bibtex": "@InProceedings{pmlr-v139-cortes21a,\n title = \t {Relative Deviation Margin Bounds},\n author = {Cortes, Corinna and Mohri, Mehryar and Suresh, Ananda Theertha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2122--2131},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cortes21a/cortes21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cortes21a.html},\n abstract = \t {We present a series of new and more favorable margin-based learning guarantees that depend on the empirical margin loss of a predictor. e give two types of learning bounds, in terms of either the Rademacher complexity or the empirical $\\ell_\\infty$-covering number of the hypothesis set used, both distribution-dependent and valid for general families. Furthermore, using our relative deviation margin bounds, we derive distribution-dependent generalization bounds for unbounded loss functions under the assumption of a finite moment. We also briefly highlight several applications of these bounds and discuss their connection with existing results.}\n}", "pdf": "http://proceedings.mlr.press/v139/cortes21a/cortes21a.pdf", "supp": "", "pdf_size": 468581, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15514178682627097474&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, New York, NY; Courant Institute of Mathematical Sciences, New York, NY; Google Research, New York, NY", "aff_domain": "google.com;cs.nyu.edu;google.com", "email": "google.com;cs.nyu.edu;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cortes21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Google;Courant Institute of Mathematical Sciences", "aff_unique_dep": "Google Research;Mathematical Sciences", "aff_unique_url": "https://research.google;https://courant.nyu.edu", "aff_unique_abbr": "Google Research;Courant", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Relative Positional Encoding for Transformers with Linear Complexity", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9021", "id": "9021", "proceeding": "http://proceedings.mlr.press/v139/liutkus21a.html", "slides": "", "author_site": "Antoine Liutkus, Ond\u0159ej C\u00edfka, Shih-Lun Wu, Umut Simsekli, Yi-Hsuan Yang, Ga\u00ebl RICHARD", "author": "Antoine Liutkus; Ond\u0159ej C\u0131\u0301fka; Shih-Lun Wu; Umut Simsekli; Yi-Hsuan Yang; Gael Richard", "abstract": "Recent advances in Transformer models allow for unprecedented sequence lengths, due to linear space and time complexity. In the meantime, relative positional encoding (RPE) was proposed as beneficial for classical Transformers and consists in exploiting lags instead of absolute positions for inference. Still, RPE is not available for the recent linear-variants of the Transformer, because it requires the explicit computation of the attention matrix, which is precisely what is avoided by such methods. In this paper, we bridge this gap and present Stochastic Positional Encoding as a way to generate PE that can be used as a replacement to the classical additive (sinusoidal) PE and provably behaves like RPE. The main theoretical contribution is to make a connection between positional encoding and cross-covariance structures of correlated Gaussian processes. We illustrate the performance of our approach on the Long-Range Arena benchmark and on music generation.", "bibtex": "@InProceedings{pmlr-v139-liutkus21a,\n title = \t {Relative Positional Encoding for Transformers with Linear Complexity},\n author = {Liutkus, Antoine and C\\'{\\i}fka, Ond{\\v{r}}ej and Wu, Shih-Lun and Simsekli, Umut and Yang, Yi-Hsuan and Richard, Gael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7067--7079},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liutkus21a/liutkus21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/liutkus21a.html},\n abstract = \t {Recent advances in Transformer models allow for unprecedented sequence lengths, due to linear space and time complexity. In the meantime, relative positional encoding (RPE) was proposed as beneficial for classical Transformers and consists in exploiting lags instead of absolute positions for inference. Still, RPE is not available for the recent linear-variants of the Transformer, because it requires the explicit computation of the attention matrix, which is precisely what is avoided by such methods. In this paper, we bridge this gap and present Stochastic Positional Encoding as a way to generate PE that can be used as a replacement to the classical additive (sinusoidal) PE and provably behaves like RPE. The main theoretical contribution is to make a connection between positional encoding and cross-covariance structures of correlated Gaussian processes. We illustrate the performance of our approach on the Long-Range Arena benchmark and on music generation.}\n}", "pdf": "http://proceedings.mlr.press/v139/liutkus21a/liutkus21a.pdf", "supp": "", "pdf_size": 1528353, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16520451235518396778&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/liutkus21a.html" }, { "title": "Representation Matters: Assessing the Importance of Subgroup Allocations in Training Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10069", "id": "10069", "proceeding": "http://proceedings.mlr.press/v139/rolf21a.html", "slides": "", "author_site": "Esther Rolf, Theodora Worledge, Benjamin Recht, Michael Jordan", "author": "Esther Rolf; Theodora T Worledge; Benjamin Recht; Michael Jordan", "abstract": "Collecting more diverse and representative training data is often touted as a remedy for the disparate performance of machine learning predictors across subpopulations. However, a precise framework for understanding how dataset properties like diversity affect learning outcomes is largely lacking. By casting data collection as part of the learning process, we demonstrate that diverse representation in training data is key not only to increasing subgroup performances, but also to achieving population-level objectives. Our analysis and experiments describe how dataset compositions influence performance and provide constructive results for using trends in existing data, alongside domain knowledge, to help guide intentional, objective-aware dataset design", "bibtex": "@InProceedings{pmlr-v139-rolf21a,\n title = \t {Representation Matters: Assessing the Importance of Subgroup Allocations in Training Data},\n author = {Rolf, Esther and Worledge, Theodora T and Recht, Benjamin and Jordan, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9040--9051},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rolf21a/rolf21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rolf21a.html},\n abstract = \t {Collecting more diverse and representative training data is often touted as a remedy for the disparate performance of machine learning predictors across subpopulations. However, a precise framework for understanding how dataset properties like diversity affect learning outcomes is largely lacking. By casting data collection as part of the learning process, we demonstrate that diverse representation in training data is key not only to increasing subgroup performances, but also to achieving population-level objectives. Our analysis and experiments describe how dataset compositions influence performance and provide constructive results for using trends in existing data, alongside domain knowledge, to help guide intentional, objective-aware dataset design}\n}", "pdf": "http://proceedings.mlr.press/v139/rolf21a/rolf21a.pdf", "supp": "", "pdf_size": 623871, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9213574703320829677&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of EECS, University of California, Berkeley; Department of EECS, University of California, Berkeley; Department of EECS, University of California, Berkeley; Department of EECS, University of California, Berkeley + Department of Statistics, University of California, Berkeley", "aff_domain": "berkeley.edu; ; ; ", "email": "berkeley.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rolf21a.html", "aff_unique_index": "0;0;0;0+0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0+0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Representation Matters: Offline Pretraining for Sequential Decision Making", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10365", "id": "10365", "proceeding": "http://proceedings.mlr.press/v139/yang21h.html", "slides": "/media/icml-2021/Slides/10365.pdf", "author_site": "Mengjiao Yang, Ofir Nachum", "author": "Mengjiao Yang; Ofir Nachum", "abstract": "The recent success of supervised learning methods on ever larger offline datasets has spurred interest in the reinforcement learning (RL) field to investigate whether the same paradigms can be translated to RL algorithms. This research area, known as offline RL, has largely focused on offline policy optimization, aiming to find a return-maximizing policy exclusively from offline data. In this paper, we consider a slightly different approach to incorporating offline data into sequential decision-making. We aim to answer the question, what unsupervised objectives applied to offline datasets are able to learn state representations which elevate performance on downstream tasks, whether those downstream tasks be online RL, imitation learning from expert demonstrations, or even offline policy optimization based on the same offline dataset? Through a variety of experiments utilizing standard offline RL datasets, we find that the use of pretraining with unsupervised learning objectives can dramatically improve the performance of policy learning algorithms that otherwise yield mediocre performance on their own. Extensive ablations further provide insights into what components of these unsupervised objectives {\u2013} e.g., reward prediction, continuous or discrete representations, pretraining or finetuning {\u2013} are most important and in which settings.", "bibtex": "@InProceedings{pmlr-v139-yang21h,\n title = \t {Representation Matters: Offline Pretraining for Sequential Decision Making},\n author = {Yang, Mengjiao and Nachum, Ofir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11784--11794},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21h/yang21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21h.html},\n abstract = \t {The recent success of supervised learning methods on ever larger offline datasets has spurred interest in the reinforcement learning (RL) field to investigate whether the same paradigms can be translated to RL algorithms. This research area, known as offline RL, has largely focused on offline policy optimization, aiming to find a return-maximizing policy exclusively from offline data. In this paper, we consider a slightly different approach to incorporating offline data into sequential decision-making. We aim to answer the question, what unsupervised objectives applied to offline datasets are able to learn state representations which elevate performance on downstream tasks, whether those downstream tasks be online RL, imitation learning from expert demonstrations, or even offline policy optimization based on the same offline dataset? Through a variety of experiments utilizing standard offline RL datasets, we find that the use of pretraining with unsupervised learning objectives can dramatically improve the performance of policy learning algorithms that otherwise yield mediocre performance on their own. Extensive ablations further provide insights into what components of these unsupervised objectives {\u2013} e.g., reward prediction, continuous or discrete representations, pretraining or finetuning {\u2013} are most important and in which settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21h/yang21h.pdf", "supp": "", "pdf_size": 2230656, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14695975873120594878&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";", "aff_domain": ";", "email": ";", "github": "https://github.com/google-research/google-research/tree/master/rl_repr", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yang21h.html" }, { "title": "Representation Subspace Distance for Domain Adaptation Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8637", "id": "8637", "proceeding": "http://proceedings.mlr.press/v139/chen21u.html", "slides": "", "author_site": "Xinyang Chen, Sinan Wang, Jianmin Wang, Mingsheng Long", "author": "Xinyang Chen; Sinan Wang; Jianmin Wang; Mingsheng Long", "abstract": "Regression, as a counterpart to classification, is a major paradigm with a wide range of applications. Domain adaptation regression extends it by generalizing a regressor from a labeled source domain to an unlabeled target domain. Existing domain adaptation regression methods have achieved positive results limited only to the shallow regime. A question arises: Why learning invariant representations in the deep regime less pronounced? A key finding of this paper is that classification is robust to feature scaling but regression is not, and aligning the distributions of deep representations will alter feature scale and impede domain adaptation regression. Based on this finding, we propose to close the domain gap through orthogonal bases of the representation spaces, which are free from feature scaling. Inspired by Riemannian geometry of Grassmann manifold, we define a geometrical distance over representation subspaces and learn deep transferable representations by minimizing it. To avoid breaking the geometrical properties of deep representations, we further introduce the bases mismatch penalization to match the ordering of orthogonal bases across representation subspaces. Our method is evaluated on three domain adaptation regression benchmarks, two of which are introduced in this paper. Our method outperforms the state-of-the-art methods significantly, forming early positive results in the deep regime.", "bibtex": "@InProceedings{pmlr-v139-chen21u,\n title = \t {Representation Subspace Distance for Domain Adaptation Regression},\n author = {Chen, Xinyang and Wang, Sinan and Wang, Jianmin and Long, Mingsheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1749--1759},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21u/chen21u.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21u.html},\n abstract = \t {Regression, as a counterpart to classification, is a major paradigm with a wide range of applications. Domain adaptation regression extends it by generalizing a regressor from a labeled source domain to an unlabeled target domain. Existing domain adaptation regression methods have achieved positive results limited only to the shallow regime. A question arises: Why learning invariant representations in the deep regime less pronounced? A key finding of this paper is that classification is robust to feature scaling but regression is not, and aligning the distributions of deep representations will alter feature scale and impede domain adaptation regression. Based on this finding, we propose to close the domain gap through orthogonal bases of the representation spaces, which are free from feature scaling. Inspired by Riemannian geometry of Grassmann manifold, we define a geometrical distance over representation subspaces and learn deep transferable representations by minimizing it. To avoid breaking the geometrical properties of deep representations, we further introduce the bases mismatch penalization to match the ordering of orthogonal bases across representation subspaces. Our method is evaluated on three domain adaptation regression benchmarks, two of which are introduced in this paper. Our method outperforms the state-of-the-art methods significantly, forming early positive results in the deep regime.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21u/chen21u.pdf", "supp": "", "pdf_size": 10137379, "gs_citation": 122, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10077953685344571140&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University", "aff_domain": "gmail.com; ; ;tsinghua.edu.cn", "email": "gmail.com; ; ;tsinghua.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chen21u.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "School of Software", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Representational aspects of depth and conditioning in normalizing flows", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9495", "id": "9495", "proceeding": "http://proceedings.mlr.press/v139/koehler21a.html", "slides": "", "author_site": "Frederic Koehler, Viraj Mehta, Andrej Risteski", "author": "Frederic Koehler; Viraj Mehta; Andrej Risteski", "abstract": "Normalizing flows are among the most popular paradigms in generative modeling, especially for images, primarily because we can efficiently evaluate the likelihood of a data point. This is desirable both for evaluating the fit of a model, and for ease of training, as maximizing the likelihood can be done by gradient descent. However, training normalizing flows comes with difficulties as well: models which produce good samples typically need to be extremely deep \u2013 which comes with accompanying vanishing/exploding gradient problems. A very related problem is that they are often poorly \\emph{conditioned}: since they are parametrized as invertible maps from $\\mathbb{R}^d \\to \\mathbb{R}^d$, and typical training data like images intuitively is lower-dimensional, the learned maps often have Jacobians that are close to being singular. In our paper, we tackle representational aspects around depth and conditioning of normalizing flows: both for general invertible architectures, and for a particular common architecture, affine couplings. We prove that $\\Theta(1)$ affine coupling layers suffice to exactly represent a permutation or $1 \\times 1$ convolution, as used in GLOW, showing that representationally the choice of partition is not a bottleneck for depth. We also show that shallow affine coupling networks are universal approximators in Wasserstein distance if ill-conditioning is allowed, and experimentally investigate related phenomena involving padding. Finally, we show a depth lower bound for general flow architectures with few neurons per layer and bounded Lipschitz constant.", "bibtex": "@InProceedings{pmlr-v139-koehler21a,\n title = \t {Representational aspects of depth and conditioning in normalizing flows},\n author = {Koehler, Frederic and Mehta, Viraj and Risteski, Andrej},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5628--5636},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/koehler21a/koehler21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/koehler21a.html},\n abstract = \t {Normalizing flows are among the most popular paradigms in generative modeling, especially for images, primarily because we can efficiently evaluate the likelihood of a data point. This is desirable both for evaluating the fit of a model, and for ease of training, as maximizing the likelihood can be done by gradient descent. However, training normalizing flows comes with difficulties as well: models which produce good samples typically need to be extremely deep \u2013 which comes with accompanying vanishing/exploding gradient problems. A very related problem is that they are often poorly \\emph{conditioned}: since they are parametrized as invertible maps from $\\mathbb{R}^d \\to \\mathbb{R}^d$, and typical training data like images intuitively is lower-dimensional, the learned maps often have Jacobians that are close to being singular. In our paper, we tackle representational aspects around depth and conditioning of normalizing flows: both for general invertible architectures, and for a particular common architecture, affine couplings. We prove that $\\Theta(1)$ affine coupling layers suffice to exactly represent a permutation or $1 \\times 1$ convolution, as used in GLOW, showing that representationally the choice of partition is not a bottleneck for depth. We also show that shallow affine coupling networks are universal approximators in Wasserstein distance if ill-conditioning is allowed, and experimentally investigate related phenomena involving padding. Finally, we show a depth lower bound for general flow architectures with few neurons per layer and bounded Lipschitz constant.}\n}", "pdf": "http://proceedings.mlr.press/v139/koehler21a/koehler21a.pdf", "supp": "", "pdf_size": 577186, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6663664440844516693&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Mathematics, Massachusetts Institute of Technology, Cambridge, MA, USA; Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA; Machine Learning Department, Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/koehler21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Massachusetts Institute of Technology;Carnegie Mellon University", "aff_unique_dep": "Department of Mathematics;Robotics Institute", "aff_unique_url": "https://web.mit.edu;https://www.cmu.edu", "aff_unique_abbr": "MIT;CMU", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Cambridge;Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Reserve Price Optimization for First Price Auctions in Display Advertising", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9295", "id": "9295", "proceeding": "http://proceedings.mlr.press/v139/feng21b.html", "slides": "", "author_site": "Zhe Feng, S\u00e9bastien Lahaie, Jon Schneider, Jinchao Ye", "author": "Zhe Feng; Sebastien Lahaie; Jon Schneider; Jinchao Ye", "abstract": "The display advertising industry has recently transitioned from second- to first-price auctions as its primary mechanism for ad allocation and pricing. In light of this, publishers need to re-evaluate and optimize their auction parameters, notably reserve prices. In this paper, we propose a gradient-based algorithm to adaptively update and optimize reserve prices based on estimates of bidders\u2019 responsiveness to experimental shocks in reserves. Our key innovation is to draw on the inherent structure of the revenue objective in order to reduce the variance of gradient estimates and improve convergence rates in both theory and practice. We show that revenue in a first-price auction can be usefully decomposed into a \\emph{demand} component and a \\emph{bidding} component, and introduce techniques to reduce the variance of each component. We characterize the bias-variance trade-offs of these techniques and validate the performance of our proposed algorithm through experiments on synthetic data and real display ad auctions data from a major ad exchange.", "bibtex": "@InProceedings{pmlr-v139-feng21b,\n title = \t {Reserve Price Optimization for First Price Auctions in Display Advertising},\n author = {Feng, Zhe and Lahaie, Sebastien and Schneider, Jon and Ye, Jinchao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3230--3239},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21b/feng21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21b.html},\n abstract = \t {The display advertising industry has recently transitioned from second- to first-price auctions as its primary mechanism for ad allocation and pricing. In light of this, publishers need to re-evaluate and optimize their auction parameters, notably reserve prices. In this paper, we propose a gradient-based algorithm to adaptively update and optimize reserve prices based on estimates of bidders\u2019 responsiveness to experimental shocks in reserves. Our key innovation is to draw on the inherent structure of the revenue objective in order to reduce the variance of gradient estimates and improve convergence rates in both theory and practice. We show that revenue in a first-price auction can be usefully decomposed into a \\emph{demand} component and a \\emph{bidding} component, and introduce techniques to reduce the variance of each component. We characterize the bias-variance trade-offs of these techniques and validate the performance of our proposed algorithm through experiments on synthetic data and real display ad auctions data from a major ad exchange.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21b/feng21b.pdf", "supp": "", "pdf_size": 448113, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6778190929965156424&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Harvard University + Google Inc, NYC; Google Inc, NYC; Google Inc, NYC; Google Inc, NYC", "aff_domain": "g.harvard.edu; ; ; ", "email": "g.harvard.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/feng21b.html", "aff_unique_index": "0+1;1;1;1", "aff_unique_norm": "Harvard University;Google", "aff_unique_dep": ";Google", "aff_unique_url": "https://www.harvard.edu;https://www.google.com", "aff_unique_abbr": "Harvard;Google", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";New York City", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Resource Allocation in Multi-armed Bandit Exploration: Overcoming Sublinear Scaling with Adaptive Parallelism", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10643", "id": "10643", "proceeding": "http://proceedings.mlr.press/v139/thananjeyan21a.html", "slides": "/media/icml-2021/Slides/10643.pdf", "author_site": "Brijen Thananjeyan, Kirthevasan Kandasamy, Ion Stoica, Michael Jordan, Ken Goldberg, Joseph E Gonzalez", "author": "Brijen Thananjeyan; Kirthevasan Kandasamy; Ion Stoica; Michael Jordan; Ken Goldberg; Joseph Gonzalez", "abstract": "We study exploration in stochastic multi-armed bandits when we have access to a divisible resource that can be allocated in varying amounts to arm pulls. We focus in particular on the allocation of distributed computing resources, where we may obtain results faster by allocating more resources per pull, but might have reduced throughput due to nonlinear scaling. For example, in simulation-based scientific studies, an expensive simulation can be sped up by running it on multiple cores. This speed-up however, is partly offset by the communication among cores, which results in lower throughput than if fewer cores were allocated to run more trials in parallel. In this paper, we explore these trade-offs in two settings. First, in a fixed confidence setting, we need to find the best arm with a given target success probability as quickly as possible. We propose an algorithm which trades off between information accumulation and throughput and show that the time taken can be upper bounded by the solution of a dynamic program whose inputs are the gaps between the sub-optimal and optimal arms. We also prove a matching hardness result. Second, we present an algorithm for a fixed deadline setting, where we are given a time deadline and need to maximize the probability of finding the best arm. We corroborate our theoretical insights with simulation experiments that show that the algorithms consistently match or outperform baseline algorithms on a variety of problem instances.", "bibtex": "@InProceedings{pmlr-v139-thananjeyan21a,\n title = \t {Resource Allocation in Multi-armed Bandit Exploration: Overcoming Sublinear Scaling with Adaptive Parallelism},\n author = {Thananjeyan, Brijen and Kandasamy, Kirthevasan and Stoica, Ion and Jordan, Michael and Goldberg, Ken and Gonzalez, Joseph},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10236--10246},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/thananjeyan21a/thananjeyan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/thananjeyan21a.html},\n abstract = \t {We study exploration in stochastic multi-armed bandits when we have access to a divisible resource that can be allocated in varying amounts to arm pulls. We focus in particular on the allocation of distributed computing resources, where we may obtain results faster by allocating more resources per pull, but might have reduced throughput due to nonlinear scaling. For example, in simulation-based scientific studies, an expensive simulation can be sped up by running it on multiple cores. This speed-up however, is partly offset by the communication among cores, which results in lower throughput than if fewer cores were allocated to run more trials in parallel. In this paper, we explore these trade-offs in two settings. First, in a fixed confidence setting, we need to find the best arm with a given target success probability as quickly as possible. We propose an algorithm which trades off between information accumulation and throughput and show that the time taken can be upper bounded by the solution of a dynamic program whose inputs are the gaps between the sub-optimal and optimal arms. We also prove a matching hardness result. Second, we present an algorithm for a fixed deadline setting, where we are given a time deadline and need to maximize the probability of finding the best arm. We corroborate our theoretical insights with simulation experiments that show that the algorithms consistently match or outperform baseline algorithms on a variety of problem instances.}\n}", "pdf": "http://proceedings.mlr.press/v139/thananjeyan21a/thananjeyan21a.pdf", "supp": "", "pdf_size": 1350427, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2138785680641580678&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu; ; ; ; ; ", "email": "berkeley.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/thananjeyan21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Rethinking Neural vs. Matrix-Factorization Collaborative Filtering: the Theoretical Perspectives", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8747", "id": "8747", "proceeding": "http://proceedings.mlr.press/v139/xu21d.html", "slides": "", "author_site": "Da Xu, Chuanwei Ruan, Evren Korpeoglu, Sushant Kumar, Kannan Achan", "author": "Da Xu; Chuanwei Ruan; Evren Korpeoglu; Sushant Kumar; Kannan Achan", "abstract": "The recent work by Rendle et al. (2020), based on empirical observations, argues that matrix-factorization collaborative filtering (MCF) compares favorably to neural collaborative filtering (NCF), and conjectures the dot product\u2019s superiority over the feed-forward neural network as similarity function. In this paper, we address the comparison rigorously by answering the following questions: 1. what is the limiting expressivity of each model; 2. under the practical gradient descent, to which solution does each optimization path converge; 3. how would the models generalize under the inductive and transductive learning setting. Our results highlight the similar expressivity for the overparameterized NCF and MCF as kernelized predictors, and reveal the relation between their optimization paths. We further show their different generalization behaviors, where MCF and NCF experience specific tradeoff and comparison in the transductive and inductive collaborative filtering setting. Lastly, by showing a novel generalization result, we reveal the critical role of correcting exposure bias for model evaluation in the inductive setting. Our results explain some of the previously observed conflicts, and we provide synthetic and real-data experiments to shed further insights to this topic.", "bibtex": "@InProceedings{pmlr-v139-xu21d,\n title = \t {Rethinking Neural vs. Matrix-Factorization Collaborative Filtering: the Theoretical Perspectives},\n author = {Xu, Da and Ruan, Chuanwei and Korpeoglu, Evren and Kumar, Sushant and Achan, Kannan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11514--11524},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21d/xu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21d.html},\n abstract = \t {The recent work by Rendle et al. (2020), based on empirical observations, argues that matrix-factorization collaborative filtering (MCF) compares favorably to neural collaborative filtering (NCF), and conjectures the dot product\u2019s superiority over the feed-forward neural network as similarity function. In this paper, we address the comparison rigorously by answering the following questions: 1. what is the limiting expressivity of each model; 2. under the practical gradient descent, to which solution does each optimization path converge; 3. how would the models generalize under the inductive and transductive learning setting. Our results highlight the similar expressivity for the overparameterized NCF and MCF as kernelized predictors, and reveal the relation between their optimization paths. We further show their different generalization behaviors, where MCF and NCF experience specific tradeoff and comparison in the transductive and inductive collaborative filtering setting. Lastly, by showing a novel generalization result, we reveal the critical role of correcting exposure bias for model evaluation in the inductive setting. Our results explain some of the previously observed conflicts, and we provide synthetic and real-data experiments to shed further insights to this topic.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21d/xu21d.pdf", "supp": "", "pdf_size": 1236108, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7089069424662537761&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Walmart Labs, Sunnyvale, California, USA; Instacart, San Francisco, California, USA + Walmart Labs, Sunnyvale, California, USA; Walmart Labs, Sunnyvale, California, USA; Walmart Labs, Sunnyvale, California, USA; Walmart Labs, Sunnyvale, California, USA", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/xu21d.html", "aff_unique_index": "0;1+0;0;0;0", "aff_unique_norm": "Walmart Labs;Instacart", "aff_unique_dep": ";", "aff_unique_url": "https://labs.walmart.com;https://www.instacart.com", "aff_unique_abbr": ";", "aff_campus_unique_index": "0;1+0;0;0;0", "aff_campus_unique": "Sunnyvale;San Francisco", "aff_country_unique_index": "0;0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Rethinking Rotated Object Detection with Gaussian Wasserstein Distance Loss", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9045", "id": "9045", "proceeding": "http://proceedings.mlr.press/v139/yang21l.html", "slides": "", "author_site": "Xue Yang, Junchi Yan, Qi Ming, Wentao Wang, xiaopeng zhang, Qi Tian", "author": "Xue Yang; Junchi Yan; Qi Ming; Wentao Wang; Xiaopeng Zhang; Qi Tian", "abstract": "Boundary discontinuity and its inconsistency to the final detection metric have been the bottleneck for rotating detection regression loss design. In this paper, we propose a novel regression loss based on Gaussian Wasserstein distance as a fundamental approach to solve the problem. Specifically, the rotated bounding box is converted to a 2-D Gaussian distribution, which enables to approximate the indifferentiable rotational IoU induced loss by the Gaussian Wasserstein distance (GWD) which can be learned efficiently by gradient back-propagation. GWD can still be informative for learning even there is no overlapping between two rotating bounding boxes which is often the case for small object detection. Thanks to its three unique properties, GWD can also elegantly solve the boundary discontinuity and square-like problem regardless how the bounding box is defined. Experiments on five datasets using different detectors show the effectiveness of our approach, and codes are available at https://github.com/yangxue0827/RotationDetection.", "bibtex": "@InProceedings{pmlr-v139-yang21l,\n title = \t {Rethinking Rotated Object Detection with Gaussian Wasserstein Distance Loss},\n author = {Yang, Xue and Yan, Junchi and Ming, Qi and Wang, Wentao and Zhang, Xiaopeng and Tian, Qi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11830--11841},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21l/yang21l.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21l.html},\n abstract = \t {Boundary discontinuity and its inconsistency to the final detection metric have been the bottleneck for rotating detection regression loss design. In this paper, we propose a novel regression loss based on Gaussian Wasserstein distance as a fundamental approach to solve the problem. Specifically, the rotated bounding box is converted to a 2-D Gaussian distribution, which enables to approximate the indifferentiable rotational IoU induced loss by the Gaussian Wasserstein distance (GWD) which can be learned efficiently by gradient back-propagation. GWD can still be informative for learning even there is no overlapping between two rotating bounding boxes which is often the case for small object detection. Thanks to its three unique properties, GWD can also elegantly solve the boundary discontinuity and square-like problem regardless how the bounding box is defined. Experiments on five datasets using different detectors show the effectiveness of our approach, and codes are available at https://github.com/yangxue0827/RotationDetection.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21l/yang21l.pdf", "supp": "", "pdf_size": 3336392, "gs_citation": 560, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9458084216549029781&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University+MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University+Huawei Inc.; Department of Computer Science and Engineering, Shanghai Jiao Tong University+MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; School of Automation, Beijing Institute of Technology; Department of Computer Science and Engineering, Shanghai Jiao Tong University; Huawei Inc.; Huawei Inc.", "aff_domain": "sjtu.edu.cn;sjtu.edu.cn; ; ; ; ", "email": "sjtu.edu.cn;sjtu.edu.cn; ; ; ; ", "github": "https://github.com/yangxue0827/RotationDetection", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yang21l.html", "aff_unique_index": "0+0+1;0+0;2;0;1;1", "aff_unique_norm": "Shanghai Jiao Tong University;Huawei;Beijing Institute of Technology", "aff_unique_dep": "Department of Computer Science and Engineering;Huawei;School of Automation", "aff_unique_url": "https://www.sjtu.edu.cn;https://www.huawei.com;http://www.bit.edu.cn/", "aff_unique_abbr": "SJTU;Huawei;BIT", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Shanghai", "aff_country_unique_index": "0+0+0;0+0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Revealing the Structure of Deep Neural Networks via Convex Duality", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10033", "id": "10033", "proceeding": "http://proceedings.mlr.press/v139/ergen21b.html", "slides": "/media/icml-2021/Slides/10033.pdf", "author_site": "Tolga Ergen, Mert Pilanci", "author": "Tolga Ergen; Mert Pilanci", "abstract": "We study regularized deep neural networks (DNNs) and introduce a convex analytic framework to characterize the structure of the hidden layers. We show that a set of optimal hidden layer weights for a norm regularized DNN training problem can be explicitly found as the extreme points of a convex set. For the special case of deep linear networks, we prove that each optimal weight matrix aligns with the previous layers via duality. More importantly, we apply the same characterization to deep ReLU networks with whitened data and prove the same weight alignment holds. As a corollary, we also prove that norm regularized deep ReLU networks yield spline interpolation for one-dimensional datasets which was previously known only for two-layer networks. Furthermore, we provide closed-form solutions for the optimal layer weights when data is rank-one or whitened. The same analysis also applies to architectures with batch normalization even for arbitrary data. Therefore, we obtain a complete explanation for a recent empirical observation termed Neural Collapse where class means collapse to the vertices of a simplex equiangular tight frame.", "bibtex": "@InProceedings{pmlr-v139-ergen21b,\n title = \t {Revealing the Structure of Deep Neural Networks via Convex Duality},\n author = {Ergen, Tolga and Pilanci, Mert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3004--3014},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ergen21b/ergen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/ergen21b.html},\n abstract = \t {We study regularized deep neural networks (DNNs) and introduce a convex analytic framework to characterize the structure of the hidden layers. We show that a set of optimal hidden layer weights for a norm regularized DNN training problem can be explicitly found as the extreme points of a convex set. For the special case of deep linear networks, we prove that each optimal weight matrix aligns with the previous layers via duality. More importantly, we apply the same characterization to deep ReLU networks with whitened data and prove the same weight alignment holds. As a corollary, we also prove that norm regularized deep ReLU networks yield spline interpolation for one-dimensional datasets which was previously known only for two-layer networks. Furthermore, we provide closed-form solutions for the optimal layer weights when data is rank-one or whitened. The same analysis also applies to architectures with batch normalization even for arbitrary data. Therefore, we obtain a complete explanation for a recent empirical observation termed Neural Collapse where class means collapse to the vertices of a simplex equiangular tight frame.}\n}", "pdf": "http://proceedings.mlr.press/v139/ergen21b/ergen21b.pdf", "supp": "", "pdf_size": 1585642, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2628178598188433341&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Electrical Engineering, Stanford University, CA, USA; Department of Electrical Engineering, Stanford University, CA, USA", "aff_domain": "stanford.edu; ", "email": "stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ergen21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Revenue-Incentive Tradeoffs in Dynamic Reserve Pricing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10011", "id": "10011", "proceeding": "http://proceedings.mlr.press/v139/deng21c.html", "slides": "", "author_site": "Yuan Deng, S\u00e9bastien Lahaie, Vahab Mirrokni, Song Zuo", "author": "Yuan Deng; Sebastien Lahaie; Vahab Mirrokni; Song Zuo", "abstract": "Online advertisements are primarily sold via repeated auctions with reserve prices. In this paper, we study how to set reserves to boost revenue based on the historical bids of strategic buyers, while controlling the impact of such a policy on the incentive compatibility of the repeated auctions. Adopting an incentive compatibility metric which quantifies the incentives to shade bids, we propose a novel class of reserve pricing policies and provide analytical tradeoffs between their revenue performance and bid-shading incentives. The policies are inspired by the exponential mechanism from the literature on differential privacy, but our study uncovers mechanisms with significantly better revenue-incentive tradeoffs than the exponential mechanism in practice. We further empirically evaluate the tradeoffs on synthetic data as well as real ad auction data from a major ad exchange to verify and support our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-deng21c,\n title = \t {Revenue-Incentive Tradeoffs in Dynamic Reserve Pricing},\n author = {Deng, Yuan and Lahaie, Sebastien and Mirrokni, Vahab and Zuo, Song},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2601--2610},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/deng21c/deng21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/deng21c.html},\n abstract = \t {Online advertisements are primarily sold via repeated auctions with reserve prices. In this paper, we study how to set reserves to boost revenue based on the historical bids of strategic buyers, while controlling the impact of such a policy on the incentive compatibility of the repeated auctions. Adopting an incentive compatibility metric which quantifies the incentives to shade bids, we propose a novel class of reserve pricing policies and provide analytical tradeoffs between their revenue performance and bid-shading incentives. The policies are inspired by the exponential mechanism from the literature on differential privacy, but our study uncovers mechanisms with significantly better revenue-incentive tradeoffs than the exponential mechanism in practice. We further empirically evaluate the tradeoffs on synthetic data as well as real ad auction data from a major ad exchange to verify and support our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/deng21c/deng21c.pdf", "supp": "", "pdf_size": 627053, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18292083168774471752&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Google Research, New York City, NY, USA; Google Research, New York City, NY, USA; Google Research, New York City, NY, USA; Google Research, New York City, NY, USA", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/deng21c.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "New York City", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Revisiting Peng\u2019s Q($\u03bb$) for Modern Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8453", "id": "8453", "proceeding": "http://proceedings.mlr.press/v139/kozuno21a.html", "slides": "/media/icml-2021/Slides/8453.pdf", "author_site": "Tadashi Kozuno, Yunhao Tang, Mark Rowland, Remi Munos, Steven Kapturowski, Will Dabney, Michal Valko, David Abel", "author": "Tadashi Kozuno; Yunhao Tang; Mark Rowland; Remi Munos; Steven Kapturowski; Will Dabney; Michal Valko; David Abel", "abstract": "Off-policy multi-step reinforcement learning algorithms consist of conservative and non-conservative algorithms: the former actively cut traces, whereas the latter do not. Recently, Munos et al. (2016) proved the convergence of conservative algorithms to an optimal Q-function. In contrast, non-conservative algorithms are thought to be unsafe and have a limited or no theoretical guarantee. Nonetheless, recent studies have shown that non-conservative algorithms empirically outperform conservative ones. Motivated by the empirical results and the lack of theory, we carry out theoretical analyses of Peng\u2019s Q($\\lambda$), a representative example of non-conservative algorithms. We prove that \\emph{it also converges to an optimal policy} provided that the behavior policy slowly tracks a greedy policy in a way similar to conservative policy iteration. Such a result has been conjectured to be true but has not been proven. We also experiment with Peng\u2019s Q($\\lambda$) in complex continuous control tasks, confirming that Peng\u2019s Q($\\lambda$) often outperforms conservative algorithms despite its simplicity. These results indicate that Peng\u2019s Q($\\lambda$), which was thought to be unsafe, is a theoretically-sound and practically effective algorithm.", "bibtex": "@InProceedings{pmlr-v139-kozuno21a,\n title = \t {Revisiting Peng\u2019s Q($\u03bb$) for Modern Reinforcement Learning},\n author = {Kozuno, Tadashi and Tang, Yunhao and Rowland, Mark and Munos, Remi and Kapturowski, Steven and Dabney, Will and Valko, Michal and Abel, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5794--5804},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kozuno21a/kozuno21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kozuno21a.html},\n abstract = \t {Off-policy multi-step reinforcement learning algorithms consist of conservative and non-conservative algorithms: the former actively cut traces, whereas the latter do not. Recently, Munos et al. (2016) proved the convergence of conservative algorithms to an optimal Q-function. In contrast, non-conservative algorithms are thought to be unsafe and have a limited or no theoretical guarantee. Nonetheless, recent studies have shown that non-conservative algorithms empirically outperform conservative ones. Motivated by the empirical results and the lack of theory, we carry out theoretical analyses of Peng\u2019s Q($\\lambda$), a representative example of non-conservative algorithms. We prove that \\emph{it also converges to an optimal policy} provided that the behavior policy slowly tracks a greedy policy in a way similar to conservative policy iteration. Such a result has been conjectured to be true but has not been proven. We also experiment with Peng\u2019s Q($\\lambda$) in complex continuous control tasks, confirming that Peng\u2019s Q($\\lambda$) often outperforms conservative algorithms despite its simplicity. These results indicate that Peng\u2019s Q($\\lambda$), which was thought to be unsafe, is a theoretically-sound and practically effective algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/kozuno21a/kozuno21a.pdf", "supp": "", "pdf_size": 3411583, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3804325348446138331&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Independent Researcher, Okayama, Japan (Now at the University of Alberta); Columbia University, NY, USA; DeepMind, London, UK; DeepMind, Paris, France; DeepMind, London, UK; DeepMind, London, UK; DeepMind, Paris, France; DeepMind, London, UK", "aff_domain": "gmail.com;columbia.edu; ; ; ; ; ; ", "email": "gmail.com;columbia.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/kozuno21a.html", "aff_unique_index": "0;1;2;2;2;2;2;2", "aff_unique_norm": "University of Alberta;Columbia University;DeepMind", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ualberta.ca;https://www.columbia.edu;https://deepmind.com", "aff_unique_abbr": "UAlberta;Columbia;DeepMind", "aff_campus_unique_index": "1;2;3;2;2;3;2", "aff_campus_unique": ";New York;London;Paris", "aff_country_unique_index": "0;1;2;3;2;2;3;2", "aff_country_unique": "Canada;United States;United Kingdom;France" }, { "title": "Revisiting Point Cloud Shape Classification with a Simple and Effective Baseline", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9099", "id": "9099", "proceeding": "http://proceedings.mlr.press/v139/goyal21a.html", "slides": "", "author_site": "Ankit Goyal, Hei Law, Bowei Liu, Alejandro Newell, Jia Deng", "author": "Ankit Goyal; Hei Law; Bowei Liu; Alejandro Newell; Jia Deng", "abstract": "Processing point cloud data is an important component of many real-world systems. As such, a wide variety of point-based approaches have been proposed, reporting steady benchmark improvements over time. We study the key ingredients of this progress and uncover two critical results. First, we find that auxiliary factors like different evaluation schemes, data augmentation strategies, and loss functions, which are independent of the model architecture, make a large difference in performance. The differences are large enough that they obscure the effect of architecture. When these factors are controlled for, PointNet++, a relatively older network, performs competitively with recent methods. Second, a very simple projection-based method, which we refer to as SimpleView, performs surprisingly well. It achieves on par or better results than sophisticated state-of-the-art methods on ModelNet40 while being half the size of PointNet++. It also outperforms state-of-the-art methods on ScanObjectNN, a real-world point cloud benchmark, and demonstrates better cross-dataset generalization. Code is available at https://github.com/princeton-vl/SimpleView.", "bibtex": "@InProceedings{pmlr-v139-goyal21a,\n title = \t {Revisiting Point Cloud Shape Classification with a Simple and Effective Baseline},\n author = {Goyal, Ankit and Law, Hei and Liu, Bowei and Newell, Alejandro and Deng, Jia},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3809--3820},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/goyal21a/goyal21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/goyal21a.html},\n abstract = \t {Processing point cloud data is an important component of many real-world systems. As such, a wide variety of point-based approaches have been proposed, reporting steady benchmark improvements over time. We study the key ingredients of this progress and uncover two critical results. First, we find that auxiliary factors like different evaluation schemes, data augmentation strategies, and loss functions, which are independent of the model architecture, make a large difference in performance. The differences are large enough that they obscure the effect of architecture. When these factors are controlled for, PointNet++, a relatively older network, performs competitively with recent methods. Second, a very simple projection-based method, which we refer to as SimpleView, performs surprisingly well. It achieves on par or better results than sophisticated state-of-the-art methods on ModelNet40 while being half the size of PointNet++. It also outperforms state-of-the-art methods on ScanObjectNN, a real-world point cloud benchmark, and demonstrates better cross-dataset generalization. Code is available at https://github.com/princeton-vl/SimpleView.}\n}", "pdf": "http://proceedings.mlr.press/v139/goyal21a/goyal21a.pdf", "supp": "", "pdf_size": 595058, "gs_citation": 295, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17283112957651231327&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Princeton University, NJ, USA; Department of Computer Science, Princeton University, NJ, USA; Department of Computer Science, Princeton University, NJ, USA; Department of Computer Science, Princeton University, NJ, USA; Department of Computer Science, Princeton University, NJ, USA", "aff_domain": "princeton.edu; ; ; ; ", "email": "princeton.edu; ; ; ; ", "github": "https://github.com/princeton-vl/SimpleView", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/goyal21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Revisiting Rainbow: Promoting more insightful and inclusive deep reinforcement learning research", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9069", "id": "9069", "proceeding": "http://proceedings.mlr.press/v139/ceron21a.html", "slides": "", "author_site": "Johan Obando Ceron, Pablo Samuel Castro", "author": "Johan Samir Obando Ceron; Pablo Samuel Castro", "abstract": "Since the introduction of DQN, a vast majority of reinforcement learning research has focused on reinforcement learning with deep neural networks as function approximators. New methods are typically evaluated on a set of environments that have now become standard, such as Atari 2600 games. While these benchmarks help standardize evaluation, their computational cost has the unfortunate side effect of widening the gap between those with ample access to computational resources, and those without. In this work we argue that, despite the community\u2019s emphasis on large-scale environments, the traditional small-scale environments can still yield valuable scientific insights and can help reduce the barriers to entry for underprivileged communities. To substantiate our claims, we empirically revisit the paper which introduced the Rainbow algorithm [Hessel et al., 2018] and present some new insights into the algorithms used by Rainbow.", "bibtex": "@InProceedings{pmlr-v139-ceron21a,\n title = \t {Revisiting Rainbow: Promoting more insightful and inclusive deep reinforcement learning research},\n author = {Ceron, Johan Samir Obando and Castro, Pablo Samuel},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1373--1383},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ceron21a/ceron21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ceron21a.html},\n abstract = \t {Since the introduction of DQN, a vast majority of reinforcement learning research has focused on reinforcement learning with deep neural networks as function approximators. New methods are typically evaluated on a set of environments that have now become standard, such as Atari 2600 games. While these benchmarks help standardize evaluation, their computational cost has the unfortunate side effect of widening the gap between those with ample access to computational resources, and those without. In this work we argue that, despite the community\u2019s emphasis on large-scale environments, the traditional small-scale environments can still yield valuable scientific insights and can help reduce the barriers to entry for underprivileged communities. To substantiate our claims, we empirically revisit the paper which introduced the Rainbow algorithm [Hessel et al., 2018] and present some new insights into the algorithms used by Rainbow.}\n}", "pdf": "http://proceedings.mlr.press/v139/ceron21a/ceron21a.pdf", "supp": "", "pdf_size": 6622793, "gs_citation": 134, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12882829322787597157&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "Google Research, Brain Team; Google Research, Brain Team", "aff_domain": "gmail.com;google.com", "email": "gmail.com;google.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ceron21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Reward Identification in Inverse Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10341", "id": "10341", "proceeding": "http://proceedings.mlr.press/v139/kim21c.html", "slides": "/media/icml-2021/Slides/10341.pdf", "author_site": "Kuno Kim, Shivam Garg, Kirankumar Shiragur, Stefano Ermon", "author": "Kuno Kim; Shivam Garg; Kirankumar Shiragur; Stefano Ermon", "abstract": "We study the problem of reward identifiability in the context of Inverse Reinforcement Learning (IRL). The reward identifiability question is critical to answer when reasoning about the effectiveness of using Markov Decision Processes (MDPs) as computational models of real world decision makers in order to understand complex decision making behavior and perform counterfactual reasoning. While identifiability has been acknowledged as a fundamental theoretical question in IRL, little is known about the types of MDPs for which rewards are identifiable, or even if there exist such MDPs. In this work, we formalize the reward identification problem in IRL and study how identifiability relates to properties of the MDP model. For deterministic MDP models with the MaxEntRL objective, we prove necessary and sufficient conditions for identifiability. Building on these results, we present efficient algorithms for testing whether or not an MDP model is identifiable.", "bibtex": "@InProceedings{pmlr-v139-kim21c,\n title = \t {Reward Identification in Inverse Reinforcement Learning},\n author = {Kim, Kuno and Garg, Shivam and Shiragur, Kirankumar and Ermon, Stefano},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5496--5505},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21c/kim21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21c.html},\n abstract = \t {We study the problem of reward identifiability in the context of Inverse Reinforcement Learning (IRL). The reward identifiability question is critical to answer when reasoning about the effectiveness of using Markov Decision Processes (MDPs) as computational models of real world decision makers in order to understand complex decision making behavior and perform counterfactual reasoning. While identifiability has been acknowledged as a fundamental theoretical question in IRL, little is known about the types of MDPs for which rewards are identifiable, or even if there exist such MDPs. In this work, we formalize the reward identification problem in IRL and study how identifiability relates to properties of the MDP model. For deterministic MDP models with the MaxEntRL objective, we prove necessary and sufficient conditions for identifiability. Building on these results, we present efficient algorithms for testing whether or not an MDP model is identifiable.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21c/kim21c.pdf", "supp": "", "pdf_size": 724931, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3652465346725241512&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu; ; ; ", "email": "cs.stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kim21c.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Riemannian Convex Potential Maps", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9033", "id": "9033", "proceeding": "http://proceedings.mlr.press/v139/cohen21a.html", "slides": "", "author_site": "samuel cohen, Brandon Amos, Yaron Lipman", "author": "Samuel Cohen; Brandon Amos; Yaron Lipman", "abstract": "Modeling distributions on Riemannian manifolds is a crucial component in understanding non-Euclidean data that arises, e.g., in physics and geology. The budding approaches in this space are limited by representational and computational tradeoffs. We propose and study a class of flows that uses convex potentials from Riemannian optimal transport. These are universal and can model distributions on any compact Riemannian manifold without requiring domain knowledge of the manifold to be integrated into the architecture. We demonstrate that these flows can model standard distributions on spheres, and tori, on synthetic and geological data.", "bibtex": "@InProceedings{pmlr-v139-cohen21a,\n title = \t {Riemannian Convex Potential Maps},\n author = {Cohen, Samuel and Amos, Brandon and Lipman, Yaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2028--2038},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cohen21a/cohen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cohen21a.html},\n abstract = \t {Modeling distributions on Riemannian manifolds is a crucial component in understanding non-Euclidean data that arises, e.g., in physics and geology. The budding approaches in this space are limited by representational and computational tradeoffs. We propose and study a class of flows that uses convex potentials from Riemannian optimal transport. These are universal and can model distributions on any compact Riemannian manifold without requiring domain knowledge of the manifold to be integrated into the architecture. We demonstrate that these flows can model standard distributions on spheres, and tori, on synthetic and geological data.}\n}", "pdf": "http://proceedings.mlr.press/v139/cohen21a/cohen21a.pdf", "supp": "", "pdf_size": 1676236, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8877178841663842639&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "github.com/facebookresearch/rcpm", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/cohen21a.html" }, { "title": "Risk Bounds and Rademacher Complexity in Batch Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10623", "id": "10623", "proceeding": "http://proceedings.mlr.press/v139/duan21a.html", "slides": "/media/icml-2021/Slides/10623.pdf", "author_site": "Yaqi Duan, Chi Jin, Zhiyuan Li", "author": "Yaqi Duan; Chi Jin; Zhiyuan Li", "abstract": "This paper considers batch Reinforcement Learning (RL) with general value function approximation. Our study investigates the minimal assumptions to reliably estimate/minimize Bellman error, and characterizes the generalization performance by (local) Rademacher complexities of general function classes, which makes initial steps in bridging the gap between statistical learning theory and batch RL. Concretely, we view the Bellman error as a surrogate loss for the optimality gap, and prove the followings: (1) In double sampling regime, the excess risk of Empirical Risk Minimizer (ERM) is bounded by the Rademacher complexity of the function class. (2) In the single sampling regime, sample-efficient risk minimization is not possible without further assumptions, regardless of algorithms. However, with completeness assumptions, the excess risk of FQI and a minimax style algorithm can be again bounded by the Rademacher complexity of the corresponding function classes. (3) Fast statistical rates can be achieved by using tools of local Rademacher complexity. Our analysis covers a wide range of function classes, including finite classes, linear spaces, kernel spaces, sparse linear features, etc.", "bibtex": "@InProceedings{pmlr-v139-duan21a,\n title = \t {Risk Bounds and Rademacher Complexity in Batch Reinforcement Learning},\n author = {Duan, Yaqi and Jin, Chi and Li, Zhiyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2892--2902},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/duan21a/duan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/duan21a.html},\n abstract = \t {This paper considers batch Reinforcement Learning (RL) with general value function approximation. Our study investigates the minimal assumptions to reliably estimate/minimize Bellman error, and characterizes the generalization performance by (local) Rademacher complexities of general function classes, which makes initial steps in bridging the gap between statistical learning theory and batch RL. Concretely, we view the Bellman error as a surrogate loss for the optimality gap, and prove the followings: (1) In double sampling regime, the excess risk of Empirical Risk Minimizer (ERM) is bounded by the Rademacher complexity of the function class. (2) In the single sampling regime, sample-efficient risk minimization is not possible without further assumptions, regardless of algorithms. However, with completeness assumptions, the excess risk of FQI and a minimax style algorithm can be again bounded by the Rademacher complexity of the corresponding function classes. (3) Fast statistical rates can be achieved by using tools of local Rademacher complexity. Our analysis covers a wide range of function classes, including finite classes, linear spaces, kernel spaces, sparse linear features, etc.}\n}", "pdf": "http://proceedings.mlr.press/v139/duan21a/duan21a.pdf", "supp": "", "pdf_size": 408528, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13720687763334694759&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Operations Research and Financial Engineering, Princeton University; Department of Electrical and Computer Engineering, Princeton University; Department of Computer Science, Princeton University", "aff_domain": "princeton.edu;princeton.edu;cs.princeton.edu", "email": "princeton.edu;princeton.edu;cs.princeton.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/duan21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Department of Operations Research and Financial Engineering", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Risk-Sensitive Reinforcement Learning with Function Approximation: A Debiasing Approach", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9329", "id": "9329", "proceeding": "http://proceedings.mlr.press/v139/fei21a.html", "slides": "", "author_site": "Yingjie Fei, Zhuoran Yang, Zhaoran Wang", "author": "Yingjie Fei; Zhuoran Yang; Zhaoran Wang", "abstract": "We study function approximation for episodic reinforcement learning with entropic risk measure. We first propose an algorithm with linear function approximation. Compared to existing algorithms, which suffer from improper regularization and regression biases, this algorithm features debiasing transformations in backward induction and regression procedures. We further propose an algorithm with general function approximation, which features implicit debiasing transformations. We prove that both algorithms achieve a sublinear regret and demonstrate a trade-off between generality and efficiency. Our analysis provides a unified framework for function approximation in risk-sensitive reinforcement learning, which leads to the first sublinear regret bounds in the setting.", "bibtex": "@InProceedings{pmlr-v139-fei21a,\n title = \t {Risk-Sensitive Reinforcement Learning with Function Approximation: A Debiasing Approach},\n author = {Fei, Yingjie and Yang, Zhuoran and Wang, Zhaoran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3198--3207},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fei21a/fei21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fei21a.html},\n abstract = \t {We study function approximation for episodic reinforcement learning with entropic risk measure. We first propose an algorithm with linear function approximation. Compared to existing algorithms, which suffer from improper regularization and regression biases, this algorithm features debiasing transformations in backward induction and regression procedures. We further propose an algorithm with general function approximation, which features implicit debiasing transformations. We prove that both algorithms achieve a sublinear regret and demonstrate a trade-off between generality and efficiency. Our analysis provides a unified framework for function approximation in risk-sensitive reinforcement learning, which leads to the first sublinear regret bounds in the setting.}\n}", "pdf": "http://proceedings.mlr.press/v139/fei21a/fei21a.pdf", "supp": "", "pdf_size": 334035, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2034722515867378506&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Northwestern University, Evanston, Illinois, USA+Princeton University, Princeton, New Jersey, USA; Princeton University, Princeton, New Jersey, USA; Northwestern University, Evanston, Illinois, USA", "aff_domain": "cornell.edu; ; ", "email": "cornell.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fei21a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "Northwestern University;Princeton University", "aff_unique_dep": ";", "aff_unique_url": "https://www.northwestern.edu;https://www.princeton.edu", "aff_unique_abbr": "NU;Princeton", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "Evanston;Princeton", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Rissanen Data Analysis: Examining Dataset Characteristics via Description Length", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10437", "id": "10437", "proceeding": "http://proceedings.mlr.press/v139/perez21a.html", "slides": "/media/icml-2021/Slides/10437_b1d2n3V.pdf", "author_site": "Ethan Perez, Douwe Kiela, Kyunghyun Cho", "author": "Ethan Perez; Douwe Kiela; Kyunghyun Cho", "abstract": "We introduce a method to determine if a certain capability helps to achieve an accurate model of given data. We view labels as being generated from the inputs by a program composed of subroutines with different capabilities, and we posit that a subroutine is useful if and only if the minimal program that invokes it is shorter than the one that does not. Since minimum program length is uncomputable, we instead estimate the labels\u2019 minimum description length (MDL) as a proxy, giving us a theoretically-grounded method for analyzing dataset characteristics. We call the method Rissanen Data Analysis (RDA) after the father of MDL, and we showcase its applicability on a wide variety of settings in NLP, ranging from evaluating the utility of generating subquestions before answering a question, to analyzing the value of rationales and explanations, to investigating the importance of different parts of speech, and uncovering dataset gender bias.", "bibtex": "@InProceedings{pmlr-v139-perez21a,\n title = \t {Rissanen Data Analysis: Examining Dataset Characteristics via Description Length},\n author = {Perez, Ethan and Kiela, Douwe and Cho, Kyunghyun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8500--8513},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/perez21a/perez21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/perez21a.html},\n abstract = \t {We introduce a method to determine if a certain capability helps to achieve an accurate model of given data. We view labels as being generated from the inputs by a program composed of subroutines with different capabilities, and we posit that a subroutine is useful if and only if the minimal program that invokes it is shorter than the one that does not. Since minimum program length is uncomputable, we instead estimate the labels\u2019 minimum description length (MDL) as a proxy, giving us a theoretically-grounded method for analyzing dataset characteristics. We call the method Rissanen Data Analysis (RDA) after the father of MDL, and we showcase its applicability on a wide variety of settings in NLP, ranging from evaluating the utility of generating subquestions before answering a question, to analyzing the value of rationales and explanations, to investigating the importance of different parts of speech, and uncovering dataset gender bias.}\n}", "pdf": "http://proceedings.mlr.press/v139/perez21a/perez21a.pdf", "supp": "", "pdf_size": 1277689, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5428264289372921149&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "New York University; Facebook AI Research; New York University + CIFAR Fellow in Learning in Machines & Brains", "aff_domain": "nyu.edu; ; ", "email": "nyu.edu; ; ", "github": "https://github.com/ethanjperez/rda", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/perez21a.html", "aff_unique_index": "0;1;0+2", "aff_unique_norm": "New York University;Meta;CIFAR", "aff_unique_dep": ";Facebook AI Research;Learning in Machines & Brains", "aff_unique_url": "https://www.nyu.edu;https://research.facebook.com;https://www.cifar.ca", "aff_unique_abbr": "NYU;FAIR;CIFAR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+1", "aff_country_unique": "United States;Canada" }, { "title": "Robust Asymmetric Learning in POMDPs", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9327", "id": "9327", "proceeding": "http://proceedings.mlr.press/v139/warrington21a.html", "slides": "", "author_site": "Andrew Warrington, Jonathan Lavington, Adam Scibior, Mark Schmidt, Frank Wood", "author": "Andrew Warrington; Jonathan W Lavington; Adam Scibior; Mark Schmidt; Frank Wood", "abstract": "Policies for partially observed Markov decision processes can be efficiently learned by imitating expert policies generated using asymmetric information. Unfortunately, existing approaches for this kind of imitation learning have a serious flaw: the expert does not know what the trainee cannot see, and as a result may encourage actions that are sub-optimal or unsafe under partial information. To address this issue, we derive an update which, when applied iteratively to an expert, maximizes the expected reward of the trainee\u2019s policy. Using this update, we construct a computationally efficient algorithm, adaptive asymmetric DAgger (A2D), that jointly trains the expert and trainee policies. We then show that A2D allows the trainee to safely imitate the modified expert, and outperforms policies learned either by imitating a fixed expert or through direct reinforcement learning.", "bibtex": "@InProceedings{pmlr-v139-warrington21a,\n title = \t {Robust Asymmetric Learning in POMDPs},\n author = {Warrington, Andrew and Lavington, Jonathan W and Scibior, Adam and Schmidt, Mark and Wood, Frank},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11013--11023},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/warrington21a/warrington21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/warrington21a.html},\n abstract = \t {Policies for partially observed Markov decision processes can be efficiently learned by imitating expert policies generated using asymmetric information. Unfortunately, existing approaches for this kind of imitation learning have a serious flaw: the expert does not know what the trainee cannot see, and as a result may encourage actions that are sub-optimal or unsafe under partial information. To address this issue, we derive an update which, when applied iteratively to an expert, maximizes the expected reward of the trainee\u2019s policy. Using this update, we construct a computationally efficient algorithm, adaptive asymmetric DAgger (A2D), that jointly trains the expert and trainee policies. We then show that A2D allows the trainee to safely imitate the modified expert, and outperforms policies learned either by imitating a fixed expert or through direct reinforcement learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/warrington21a/warrington21a.pdf", "supp": "", "pdf_size": 2581832, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3140825517966878728&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Engineering Science, University of Oxford; Department of Computer Science, University of British Columbia + Inverted AI + Montr \u00b4eal Institute for Learning Algorithms (MILA); Inverted AI + Montr \u00b4eal Institute for Learning Algorithms (MILA); Department of Computer Science, University of British Columbia + Inverted AI + Alberta Machine Learning Intelligence Institute (AMII); Alberta Machine Learning Intelligence Institute (AMII)", "aff_domain": "robots.ox.ac.uk; ; ; ; ", "email": "robots.ox.ac.uk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/warrington21a.html", "aff_unique_index": "0;1+2+3;2+3;1+2+4;4", "aff_unique_norm": "University of Oxford;University of British Columbia;Inverted AI;Montr\u00e9al Institute for Learning Algorithms;Alberta Machine Learning Intelligence Institute", "aff_unique_dep": "Department of Engineering Science;Department of Computer Science;;;Machine Learning Intelligence", "aff_unique_url": "https://www.ox.ac.uk;https://www.ubc.ca;https://www.inverted.ai;https://mila.quebec;https://www.amii.ca", "aff_unique_abbr": "Oxford;UBC;Inverted AI;MILA;AMII", "aff_campus_unique_index": "0;1;;1", "aff_campus_unique": "Oxford;Vancouver;", "aff_country_unique_index": "0;1+2+1;2+1;1+2+1;1", "aff_country_unique": "United Kingdom;Canada;United States" }, { "title": "Robust Density Estimation from Batches: The Best Things in Life are (Nearly) Free", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9439", "id": "9439", "proceeding": "http://proceedings.mlr.press/v139/jain21a.html", "slides": "", "author_site": "Ayush Jain, Alon Orlitsky", "author": "Ayush Jain; Alon Orlitsky", "abstract": "In many applications data are collected in batches, some potentially biased, corrupt, or even adversarial. Learning algorithms for this setting have therefore garnered considerable recent attention. In particular, a sequence of works has shown that all approximately piecewise polynomial distributions\u2014and in particular all Gaussian, Gaussian-mixture, log-concave, low-modal, and monotone-hazard distributions\u2014can be learned robustly in polynomial time. However, these results left open the question, stated explicitly in\u00a0\\cite{chen2020learning}, about the best possible sample complexity of such algorithms. We answer this question, showing that, perhaps surprisingly, up to logarithmic factors, the optimal sample complexity is the same as for genuine, non-adversarial, data! To establish the result, we reduce robust learning of approximately piecewise polynomial distributions to robust learning of the probability of all subsets of size at most $k$ of a larger discrete domain, and learn these probabilities in optimal sample complexity linear in $k$ regardless of the domain size. In simulations, the algorithm runs very quickly and estimates distributions to essentially the accuracy achieved when all adversarial batches are removed. The results also imply the first polynomial-time sample-optimal algorithm for robust interval-based classification based on batched data.", "bibtex": "@InProceedings{pmlr-v139-jain21a,\n title = \t {Robust Density Estimation from Batches: The Best Things in Life are (Nearly) Free},\n author = {Jain, Ayush and Orlitsky, Alon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4698--4708},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jain21a/jain21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jain21a.html},\n abstract = \t {In many applications data are collected in batches, some potentially biased, corrupt, or even adversarial. Learning algorithms for this setting have therefore garnered considerable recent attention. In particular, a sequence of works has shown that all approximately piecewise polynomial distributions\u2014and in particular all Gaussian, Gaussian-mixture, log-concave, low-modal, and monotone-hazard distributions\u2014can be learned robustly in polynomial time. However, these results left open the question, stated explicitly in\u00a0\\cite{chen2020learning}, about the best possible sample complexity of such algorithms. We answer this question, showing that, perhaps surprisingly, up to logarithmic factors, the optimal sample complexity is the same as for genuine, non-adversarial, data! To establish the result, we reduce robust learning of approximately piecewise polynomial distributions to robust learning of the probability of all subsets of size at most $k$ of a larger discrete domain, and learn these probabilities in optimal sample complexity linear in $k$ regardless of the domain size. In simulations, the algorithm runs very quickly and estimates distributions to essentially the accuracy achieved when all adversarial batches are removed. The results also imply the first polynomial-time sample-optimal algorithm for robust interval-based classification based on batched data.}\n}", "pdf": "http://proceedings.mlr.press/v139/jain21a/jain21a.pdf", "supp": "", "pdf_size": 1788812, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11409436065244869623&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "University of California, San Diego; University of California, San Diego", "aff_domain": "eng.ucsd.edu;eng.ucsd.edu", "email": "eng.ucsd.edu;eng.ucsd.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jain21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "San Diego", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Robust Inference for High-Dimensional Linear Models via Residual Randomization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9141", "id": "9141", "proceeding": "http://proceedings.mlr.press/v139/wang21m.html", "slides": "", "author_site": "Y. Samuel Wang, Si Kai Lee, Panos Toulis, Mladen Kolar", "author": "Y. Samuel Wang; Si Kai Lee; Panos Toulis; Mladen Kolar", "abstract": "We propose a residual randomization procedure designed for robust inference using Lasso estimates in the high-dimensional setting. Compared to earlier work that focuses on sub-Gaussian errors, the proposed procedure is designed to work robustly in settings that also include heavy-tailed covariates and errors. Moreover, our procedure can be valid under clustered errors, which is important in practice, but has been largely overlooked by earlier work. Through extensive simulations, we illustrate our method\u2019s wider range of applicability as suggested by theory. In particular, we show that our method outperforms state-of-art methods in challenging, yet more realistic, settings where the distribution of covariates is heavy-tailed or the sample size is small, while it remains competitive in standard, \u201cwell behaved\" settings previously studied in the literature.", "bibtex": "@InProceedings{pmlr-v139-wang21m,\n title = \t {Robust Inference for High-Dimensional Linear Models via Residual Randomization},\n author = {Wang, Y. Samuel and Lee, Si Kai and Toulis, Panos and Kolar, Mladen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10805--10815},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21m/wang21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21m.html},\n abstract = \t {We propose a residual randomization procedure designed for robust inference using Lasso estimates in the high-dimensional setting. Compared to earlier work that focuses on sub-Gaussian errors, the proposed procedure is designed to work robustly in settings that also include heavy-tailed covariates and errors. Moreover, our procedure can be valid under clustered errors, which is important in practice, but has been largely overlooked by earlier work. Through extensive simulations, we illustrate our method\u2019s wider range of applicability as suggested by theory. In particular, we show that our method outperforms state-of-art methods in challenging, yet more realistic, settings where the distribution of covariates is heavy-tailed or the sample size is small, while it remains competitive in standard, \u201cwell behaved\" settings previously studied in the literature.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21m/wang21m.pdf", "supp": "", "pdf_size": 630913, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7848775259409033077&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Booth School of Business, University of Chicago; Booth School of Business, University of Chicago; Booth School of Business, University of Chicago; Booth School of Business, University of Chicago", "aff_domain": "uchicago.edu;uchicago.edu; ; ", "email": "uchicago.edu;uchicago.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21m.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Chicago", "aff_unique_dep": "Booth School of Business", "aff_unique_url": "https://www.chicagobooth.edu", "aff_unique_abbr": "Chicago Booth", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Chicago", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Learning for Data Poisoning Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10183", "id": "10183", "proceeding": "http://proceedings.mlr.press/v139/wang21r.html", "slides": "/media/icml-2021/Slides/10183.pdf", "author_site": "Yunjuan Wang, Poorya Mianjy, Raman Arora", "author": "Yunjuan Wang; Poorya Mianjy; Raman Arora", "abstract": "We investigate the robustness of stochastic approximation approaches against data poisoning attacks. We focus on two-layer neural networks with ReLU activation and show that under a specific notion of separability in the RKHS induced by the infinite-width network, training (finite-width) networks with stochastic gradient descent is robust against data poisoning attacks. Interestingly, we find that in addition to a lower bound on the width of the network, which is standard in the literature, we also require a distribution-dependent upper bound on the width for robust generalization. We provide extensive empirical evaluations that support and validate our theoretical results.", "bibtex": "@InProceedings{pmlr-v139-wang21r,\n title = \t {Robust Learning for Data Poisoning Attacks},\n author = {Wang, Yunjuan and Mianjy, Poorya and Arora, Raman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10859--10869},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21r/wang21r.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21r.html},\n abstract = \t {We investigate the robustness of stochastic approximation approaches against data poisoning attacks. We focus on two-layer neural networks with ReLU activation and show that under a specific notion of separability in the RKHS induced by the infinite-width network, training (finite-width) networks with stochastic gradient descent is robust against data poisoning attacks. Interestingly, we find that in addition to a lower bound on the width of the network, which is standard in the literature, we also require a distribution-dependent upper bound on the width for robust generalization. We provide extensive empirical evaluations that support and validate our theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21r/wang21r.pdf", "supp": "", "pdf_size": 2915666, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15738759785276050535&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Johns Hopkins University, Baltimore, MD, USA; Department of Computer Science, Johns Hopkins University, Baltimore, MD, USA; Department of Computer Science, Johns Hopkins University, Baltimore, MD, USA", "aff_domain": "cs.jhu.edu;cs.jhu.edu;cs.jhu.edu", "email": "cs.jhu.edu;cs.jhu.edu;cs.jhu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wang21r.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Baltimore", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Learning-Augmented Caching: An Experimental Study", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8581", "id": "8581", "proceeding": "http://proceedings.mlr.press/v139/chledowski21a.html", "slides": "", "author_site": "Jakub Ch\u0142\u0119dowski, Adam Polak, Bartosz Szabucki, Konrad Zolna", "author": "Jakub Ch\u0142\u0119dowski; Adam Polak; Bartosz Szabucki; Konrad Tomasz \u017bo\u0142na", "abstract": "Effective caching is crucial for performance of modern-day computing systems. A key optimization problem arising in caching \u2013 which item to evict to make room for a new item \u2013 cannot be optimally solved without knowing the future. There are many classical approximation algorithms for this problem, but more recently researchers started to successfully apply machine learning to decide what to evict by discovering implicit input patterns and predicting the future. While machine learning typically does not provide any worst-case guarantees, the new field of learning-augmented algorithms proposes solutions which leverage classical online caching algorithms to make the machine-learned predictors robust. We are the first to comprehensively evaluate these learning-augmented algorithms on real-world caching datasets and state-of-the-art machine-learned predictors. We show that a straightforward method \u2013 blindly following either a predictor or a classical robust algorithm, and switching whenever one becomes worse than the other \u2013 has only a low overhead over a well-performing predictor, while competing with classical methods when the coupled predictor fails, thus providing a cheap worst-case insurance.", "bibtex": "@InProceedings{pmlr-v139-chledowski21a,\n title = \t {Robust Learning-Augmented Caching: An Experimental Study},\n author = {Ch{\\l}{\\k{e}}dowski, Jakub and Polak, Adam and Szabucki, Bartosz and {\\.Z}o{\\l}na, Konrad Tomasz},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1920--1930},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chledowski21a/chledowski21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chledowski21a.html},\n abstract = \t {Effective caching is crucial for performance of modern-day computing systems. A key optimization problem arising in caching \u2013 which item to evict to make room for a new item \u2013 cannot be optimally solved without knowing the future. There are many classical approximation algorithms for this problem, but more recently researchers started to successfully apply machine learning to decide what to evict by discovering implicit input patterns and predicting the future. While machine learning typically does not provide any worst-case guarantees, the new field of learning-augmented algorithms proposes solutions which leverage classical online caching algorithms to make the machine-learned predictors robust. We are the first to comprehensively evaluate these learning-augmented algorithms on real-world caching datasets and state-of-the-art machine-learned predictors. We show that a straightforward method \u2013 blindly following either a predictor or a classical robust algorithm, and switching whenever one becomes worse than the other \u2013 has only a low overhead over a well-performing predictor, while competing with classical methods when the coupled predictor fails, thus providing a cheap worst-case insurance.}\n}", "pdf": "http://proceedings.mlr.press/v139/chledowski21a/chledowski21a.pdf", "supp": "", "pdf_size": 3011787, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7732162850430458310&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Jagiellonian University, Krak\u00f3w, Poland+DeepMind, London, United Kingdom; EPFL, Lausanne, Switzerland; Jagiellonian University, Krak\u00f3w, Poland+DeepMind, London, United Kingdom; Jagiellonian University, Krak\u00f3w, Poland+DeepMind, London, United Kingdom", "aff_domain": "gmail.com;epfl.ch;gmail.com;gmail.com", "email": "gmail.com;epfl.ch;gmail.com;gmail.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chledowski21a.html", "aff_unique_index": "0+1;2;0+1;0+1", "aff_unique_norm": "Jagiellonian University;DeepMind;EPFL", "aff_unique_dep": ";;", "aff_unique_url": "https://www.uj.edu.pl;https://deepmind.com;https://www.epfl.ch", "aff_unique_abbr": "UJ;DeepMind;EPFL", "aff_campus_unique_index": "0+1;2;0+1;0+1", "aff_campus_unique": "Krak\u00f3w;London;Lausanne", "aff_country_unique_index": "0+1;2;0+1;0+1", "aff_country_unique": "Poland;United Kingdom;Switzerland" }, { "title": "Robust Policy Gradient against Strong Data Corruption", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10179", "id": "10179", "proceeding": "http://proceedings.mlr.press/v139/zhang21d.html", "slides": "", "author_site": "Xuezhou Zhang, Yiding Chen, Jerry Zhu, Wen Sun", "author": "Xuezhou Zhang; Yiding Chen; Xiaojin Zhu; Wen Sun", "abstract": "We study the problem of robust reinforcement learning under adversarial corruption on both rewards and transitions. Our attack model assumes an \\textit{adaptive} adversary who can arbitrarily corrupt the reward and transition at every step within an episode, for at most $\\epsilon$-fraction of the learning episodes. Our attack model is strictly stronger than those considered in prior works. Our first result shows that no algorithm can find a better than $O(\\epsilon)$-optimal policy under our attack model. Next, we show that surprisingly the natural policy gradient (NPG) method retains a natural robustness property if the reward corruption is bounded, and can find an $O(\\sqrt{\\epsilon})$-optimal policy. Consequently, we develop a Filtered Policy Gradient (FPG) algorithm that can tolerate even unbounded reward corruption and can find an $O(\\epsilon^{1/4})$-optimal policy. We emphasize that FPG is the first that can achieve a meaningful learning guarantee when a constant fraction of episodes are corrupted. Complimentary to the theoretical results, we show that a neural implementation of FPG achieves strong robust learning performance on the MuJoCo continuous control benchmarks.", "bibtex": "@InProceedings{pmlr-v139-zhang21d,\n title = \t {Robust Policy Gradient against Strong Data Corruption},\n author = {Zhang, Xuezhou and Chen, Yiding and Zhu, Xiaojin and Sun, Wen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12391--12401},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21d/zhang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21d.html},\n abstract = \t {We study the problem of robust reinforcement learning under adversarial corruption on both rewards and transitions. Our attack model assumes an \\textit{adaptive} adversary who can arbitrarily corrupt the reward and transition at every step within an episode, for at most $\\epsilon$-fraction of the learning episodes. Our attack model is strictly stronger than those considered in prior works. Our first result shows that no algorithm can find a better than $O(\\epsilon)$-optimal policy under our attack model. Next, we show that surprisingly the natural policy gradient (NPG) method retains a natural robustness property if the reward corruption is bounded, and can find an $O(\\sqrt{\\epsilon})$-optimal policy. Consequently, we develop a Filtered Policy Gradient (FPG) algorithm that can tolerate even unbounded reward corruption and can find an $O(\\epsilon^{1/4})$-optimal policy. We emphasize that FPG is the first that can achieve a meaningful learning guarantee when a constant fraction of episodes are corrupted. Complimentary to the theoretical results, we show that a neural implementation of FPG achieves strong robust learning performance on the MuJoCo continuous control benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21d/zhang21d.pdf", "supp": "", "pdf_size": 3969088, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5709291198914313258&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Sciences, University of Wisconsin\u2013Madison; Department of Computer Sciences, University of Wisconsin\u2013Madison; Department of Computer Sciences, University of Wisconsin\u2013Madison; Cornell University", "aff_domain": "wisc.edu; ; ; ", "email": "wisc.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zhang21d.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of Wisconsin\u2013Madison;Cornell University", "aff_unique_dep": "Department of Computer Sciences;", "aff_unique_url": "https://www.wisc.edu;https://www.cornell.edu", "aff_unique_abbr": "UW\u2013Madison;Cornell", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Madison;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Pure Exploration in Linear Bandits with Limited Budget", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9799", "id": "9799", "proceeding": "http://proceedings.mlr.press/v139/alieva21a.html", "slides": "", "author_site": "Ayya Alieva, Ashok Cutkosky, Abhimanyu Das", "author": "Ayya Alieva; Ashok Cutkosky; Abhimanyu Das", "abstract": "We consider the pure exploration problem in the fixed-budget linear bandit setting. We provide a new algorithm that identifies the best arm with high probability while being robust to unknown levels of observation noise as well as to moderate levels of misspecification in the linear model. Our technique combines prior approaches to pure exploration in the multi-armed bandit problem with optimal experimental design algorithms to obtain both problem dependent and problem independent bounds. Our success probability is never worse than that of an algorithm that ignores the linear structure, but seamlessly takes advantage of such structure when possible. Furthermore, we only need the number of samples to scale with the dimension of the problem rather than the number of arms. We complement our theoretical results with empirical validation.", "bibtex": "@InProceedings{pmlr-v139-alieva21a,\n title = \t {Robust Pure Exploration in Linear Bandits with Limited Budget},\n author = {Alieva, Ayya and Cutkosky, Ashok and Das, Abhimanyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {187--195},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/alieva21a/alieva21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/alieva21a.html},\n abstract = \t {We consider the pure exploration problem in the fixed-budget linear bandit setting. We provide a new algorithm that identifies the best arm with high probability while being robust to unknown levels of observation noise as well as to moderate levels of misspecification in the linear model. Our technique combines prior approaches to pure exploration in the multi-armed bandit problem with optimal experimental design algorithms to obtain both problem dependent and problem independent bounds. Our success probability is never worse than that of an algorithm that ignores the linear structure, but seamlessly takes advantage of such structure when possible. Furthermore, we only need the number of samples to scale with the dimension of the problem rather than the number of arms. We complement our theoretical results with empirical validation.}\n}", "pdf": "http://proceedings.mlr.press/v139/alieva21a/alieva21a.pdf", "supp": "", "pdf_size": 475117, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14214679049805767528&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Stanford University; Boston University; Google Research", "aff_domain": "stanford.edu;cutkosky.com;google.com", "email": "stanford.edu;cutkosky.com;google.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/alieva21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Stanford University;Boston University;Google", "aff_unique_dep": ";;Google Research", "aff_unique_url": "https://www.stanford.edu;https://www.bu.edu;https://research.google", "aff_unique_abbr": "Stanford;BU;Google Research", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Stanford;;Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Reinforcement Learning using Least Squares Policy Iteration with Provable Performance Guarantees", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8693", "id": "8693", "proceeding": "http://proceedings.mlr.press/v139/badrinath21a.html", "slides": "/media/icml-2021/Slides/8693.pdf", "author_site": "Kishan Panaganti, Dileep Kalathil", "author": "Kishan Panaganti Badrinath; Dileep Kalathil", "abstract": "This paper addresses the problem of model-free reinforcement learning for Robust Markov Decision Process (RMDP) with large state spaces. The goal of the RMDPs framework is to find a policy that is robust against the parameter uncertainties due to the mismatch between the simulator model and real-world settings. We first propose the Robust Least Squares Policy Evaluation algorithm, which is a multi-step online model-free learning algorithm for policy evaluation. We prove the convergence of this algorithm using stochastic approximation techniques. We then propose Robust Least Squares Policy Iteration (RLSPI) algorithm for learning the optimal robust policy. We also give a general weighted Euclidean norm bound on the error (closeness to optimality) of the resulting policy. Finally, we demonstrate the performance of our RLSPI algorithm on some benchmark problems from OpenAI Gym.", "bibtex": "@InProceedings{pmlr-v139-badrinath21a,\n title = \t {Robust Reinforcement Learning using Least Squares Policy Iteration with Provable Performance Guarantees},\n author = {Badrinath, Kishan Panaganti and Kalathil, Dileep},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {511--520},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/badrinath21a/badrinath21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/badrinath21a.html},\n abstract = \t {This paper addresses the problem of model-free reinforcement learning for Robust Markov Decision Process (RMDP) with large state spaces. The goal of the RMDPs framework is to find a policy that is robust against the parameter uncertainties due to the mismatch between the simulator model and real-world settings. We first propose the Robust Least Squares Policy Evaluation algorithm, which is a multi-step online model-free learning algorithm for policy evaluation. We prove the convergence of this algorithm using stochastic approximation techniques. We then propose Robust Least Squares Policy Iteration (RLSPI) algorithm for learning the optimal robust policy. We also give a general weighted Euclidean norm bound on the error (closeness to optimality) of the resulting policy. Finally, we demonstrate the performance of our RLSPI algorithm on some benchmark problems from OpenAI Gym.}\n}", "pdf": "http://proceedings.mlr.press/v139/badrinath21a/badrinath21a.pdf", "supp": "", "pdf_size": 2670188, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17433775890055229852&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, Texas A&M University, College Station, United States; Department of Electrical and Computer Engineering, Texas A&M University, College Station, United States", "aff_domain": "tamu.edu;tamu.edu", "email": "tamu.edu;tamu.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/badrinath21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Texas A&M University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.tamu.edu", "aff_unique_abbr": "TAMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "College Station", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Robust Representation Learning via Perceptual Similarity Metrics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10357", "id": "10357", "proceeding": "http://proceedings.mlr.press/v139/taghanaki21a.html", "slides": "/media/icml-2021/Slides/10357.pdf", "author_site": "Saeid A Taghanaki, Kristy Choi, Amir Hosein Khasahmadi, Anirudh Goyal", "author": "Saeid A Taghanaki; Kristy Choi; Amir Hosein Khasahmadi; Anirudh Goyal", "abstract": "A fundamental challenge in artificial intelligence is learning useful representations of data that yield good performance on a downstream classification task, without overfitting to spurious input features. Extracting such task-relevant predictive information becomes particularly difficult for noisy and high-dimensional real-world data. In this work, we propose Contrastive Input Morphing (CIM), a representation learning framework that learns input-space transformations of the data to mitigate the effect of irrelevant input features on downstream performance. Our method leverages a perceptual similarity metric via a triplet loss to ensure that the transformation preserves task-relevant information. Empirically, we demonstrate the efficacy of our approach on various tasks which typically suffer from the presence of spurious correlations: classification with nuisance information, out-of-distribution generalization, and preservation of subgroup accuracies. We additionally show that CIM is complementary to other mutual information-based representation learning techniques, and demonstrate that it improves the performance of variational information bottleneck (VIB) when used in conjunction.", "bibtex": "@InProceedings{pmlr-v139-taghanaki21a,\n title = \t {Robust Representation Learning via Perceptual Similarity Metrics},\n author = {Taghanaki, Saeid A and Choi, Kristy and Khasahmadi, Amir Hosein and Goyal, Anirudh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10043--10053},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/taghanaki21a/taghanaki21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/taghanaki21a.html},\n abstract = \t {A fundamental challenge in artificial intelligence is learning useful representations of data that yield good performance on a downstream classification task, without overfitting to spurious input features. Extracting such task-relevant predictive information becomes particularly difficult for noisy and high-dimensional real-world data. In this work, we propose Contrastive Input Morphing (CIM), a representation learning framework that learns input-space transformations of the data to mitigate the effect of irrelevant input features on downstream performance. Our method leverages a perceptual similarity metric via a triplet loss to ensure that the transformation preserves task-relevant information. Empirically, we demonstrate the efficacy of our approach on various tasks which typically suffer from the presence of spurious correlations: classification with nuisance information, out-of-distribution generalization, and preservation of subgroup accuracies. We additionally show that CIM is complementary to other mutual information-based representation learning techniques, and demonstrate that it improves the performance of variational information bottleneck (VIB) when used in conjunction.}\n}", "pdf": "http://proceedings.mlr.press/v139/taghanaki21a/taghanaki21a.pdf", "supp": "", "pdf_size": 6845470, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15999739069428348800&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Autodesk AI Lab; Computer Science, Stanford University; Autodesk AI Lab; Mila, Universit\u00e9 de Montr\u00e9al", "aff_domain": "autodesk.com; ; ; ", "email": "autodesk.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/taghanaki21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Autodesk;Stanford University;Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "Autodesk AI Lab;Computer Science;Mila", "aff_unique_url": "https://www.autodesk.com;https://www.stanford.edu;https://umontreal.ca", "aff_unique_abbr": "Autodesk;Stanford;UdeM", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Stanford;Montr\u00e9al", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "United States;Canada" }, { "title": "Robust Testing and Estimation under Manipulation Attacks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9361", "id": "9361", "proceeding": "http://proceedings.mlr.press/v139/acharya21a.html", "slides": "", "author_site": "Jayadev Acharya, Ziteng Sun, Huanyu Zhang", "author": "Jayadev Acharya; Ziteng Sun; Huanyu Zhang", "abstract": "We study robust testing and estimation of discrete distributions in the strong contamination model. Our results cover both centralized setting and distributed setting with general local information constraints including communication and LDP constraints. Our technique relates the strength of manipulation attacks to the earth-mover distance using Hamming distance as the metric between messages (samples) from the users. In the centralized setting, we provide optimal error bounds for both learning and testing. Our lower bounds under local information constraints build on the recent lower bound methods in distributed inference. In the communication constrained setting, we develop novel algorithms based on random hashing and an L1-L1 isometry.", "bibtex": "@InProceedings{pmlr-v139-acharya21a,\n title = \t {Robust Testing and Estimation under Manipulation Attacks},\n author = {Acharya, Jayadev and Sun, Ziteng and Zhang, Huanyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {43--53},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/acharya21a/acharya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/acharya21a.html},\n abstract = \t {We study robust testing and estimation of discrete distributions in the strong contamination model. Our results cover both centralized setting and distributed setting with general local information constraints including communication and LDP constraints. Our technique relates the strength of manipulation attacks to the earth-mover distance using Hamming distance as the metric between messages (samples) from the users. In the centralized setting, we provide optimal error bounds for both learning and testing. Our lower bounds under local information constraints build on the recent lower bound methods in distributed inference. In the communication constrained setting, we develop novel algorithms based on random hashing and an L1-L1 isometry.}\n}", "pdf": "http://proceedings.mlr.press/v139/acharya21a/acharya21a.pdf", "supp": "", "pdf_size": 362222, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15668235504290944123&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Electrical and Computer Engineering, Cornell University; Electrical and Computer Engineering, Cornell University; Electrical and Computer Engineering, Cornell University", "aff_domain": "cornell.edu;cornell.edu;cornell.edu", "email": "cornell.edu;cornell.edu;cornell.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/acharya21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Electrical and Computer Engineering", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Unsupervised Learning via L-statistic Minimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9291", "id": "9291", "proceeding": "http://proceedings.mlr.press/v139/maurer21a.html", "slides": "", "author_site": "Andreas Maurer, Daniela Angela Parletta, Andrea Paudice, Massimiliano Pontil", "author": "Andreas Maurer; Daniela Angela Parletta; Andrea Paudice; Massimiliano Pontil", "abstract": "Designing learning algorithms that are resistant to perturbations of the underlying data distribution is a problem of wide practical and theoretical importance. We present a general approach to this problem focusing on unsupervised learning. The key assumption is that the perturbing distribution is characterized by larger losses relative to a given class of admissible models. This is exploited by a general descent algorithm which minimizes an $L$-statistic criterion over the model class, weighting small losses more. Our analysis characterizes the robustness of the method in terms of bounds on the reconstruction error relative to the underlying unperturbed distribution. As a byproduct, we prove uniform convergence bounds with respect to the proposed criterion for several popular models in unsupervised learning, a result which may be of independent interest. Numerical experiments with \\textsc{kmeans} clustering and principal subspace analysis demonstrate the effectiveness of our approach.", "bibtex": "@InProceedings{pmlr-v139-maurer21a,\n title = \t {Robust Unsupervised Learning via L-statistic Minimization},\n author = {Maurer, Andreas and Parletta, Daniela Angela and Paudice, Andrea and Pontil, Massimiliano},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7524--7533},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/maurer21a/maurer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/maurer21a.html},\n abstract = \t {Designing learning algorithms that are resistant to perturbations of the underlying data distribution is a problem of wide practical and theoretical importance. We present a general approach to this problem focusing on unsupervised learning. The key assumption is that the perturbing distribution is characterized by larger losses relative to a given class of admissible models. This is exploited by a general descent algorithm which minimizes an $L$-statistic criterion over the model class, weighting small losses more. Our analysis characterizes the robustness of the method in terms of bounds on the reconstruction error relative to the underlying unperturbed distribution. As a byproduct, we prove uniform convergence bounds with respect to the proposed criterion for several popular models in unsupervised learning, a result which may be of independent interest. Numerical experiments with \\textsc{kmeans} clustering and principal subspace analysis demonstrate the effectiveness of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/maurer21a/maurer21a.pdf", "supp": "", "pdf_size": 1026393, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17994117657604926977&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Istituto Italiano di Tecnologia, Genoa, Italy; Istituto Italiano di Tecnologia, Genoa, Italy+University of Genoa, Genoa, Italy; Istituto Italiano di Tecnologia, Genoa, Italy+University of Milan, Milan, Italy; Istituto Italiano di Tecnologia, Genoa, Italy+University College London, London, UK", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/maurer21a.html", "aff_unique_index": "0;0+1;0+2;0+3", "aff_unique_norm": "Istituto Italiano di Tecnologia;University of Genoa;University of Milan;University College London", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.iit.it;https://www.unige.it;https://www.unimi.it;https://www.ucl.ac.uk", "aff_unique_abbr": "IIT;UniGe;UniMi;UCL", "aff_campus_unique_index": "0;0+0;0+1;0+2", "aff_campus_unique": "Genoa;Milan;London", "aff_country_unique_index": "0;0+0;0+0;0+1", "aff_country_unique": "Italy;United Kingdom" }, { "title": "Run-Sort-ReRun: Escaping Batch Size Limitations in Sliced Wasserstein Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10569", "id": "10569", "proceeding": "http://proceedings.mlr.press/v139/lezama21a.html", "slides": "", "author_site": "Jos\u00e9 Lezama, Wei Chen, Qiang Qiu", "author": "Jose Lezama; Wei Chen; Qiang Qiu", "abstract": "When training an implicit generative model, ideally one would like the generator to reproduce all the different modes and subtleties of the target distribution. Naturally, when comparing two empirical distributions, the larger the sample population, the more these statistical nuances can be captured. However, existing objective functions are computationally constrained in the amount of samples they can consider by the memory required to process a batch of samples. In this paper, we build upon recent progress in sliced Wasserstein distances, a family of differentiable metrics for distribution discrepancy based on the Optimal Transport paradigm. We introduce a procedure to train these distances with virtually any batch size, allowing the discrepancy measure to capture richer statistics and better approximating the distance between the underlying continuous distributions. As an example, we demonstrate the matching of the distribution of Inception features with batches of tens of thousands of samples, achieving FID scores that outperform state-of-the-art implicit generative models.", "bibtex": "@InProceedings{pmlr-v139-lezama21a,\n title = \t {Run-Sort-ReRun: Escaping Batch Size Limitations in Sliced Wasserstein Generative Models},\n author = {Lezama, Jose and Chen, Wei and Qiu, Qiang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6275--6285},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lezama21a/lezama21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lezama21a.html},\n abstract = \t {When training an implicit generative model, ideally one would like the generator to reproduce all the different modes and subtleties of the target distribution. Naturally, when comparing two empirical distributions, the larger the sample population, the more these statistical nuances can be captured. However, existing objective functions are computationally constrained in the amount of samples they can consider by the memory required to process a batch of samples. In this paper, we build upon recent progress in sliced Wasserstein distances, a family of differentiable metrics for distribution discrepancy based on the Optimal Transport paradigm. We introduce a procedure to train these distances with virtually any batch size, allowing the discrepancy measure to capture richer statistics and better approximating the distance between the underlying continuous distributions. As an example, we demonstrate the matching of the distribution of Inception features with batches of tens of thousands of samples, achieving FID scores that outperform state-of-the-art implicit generative models.}\n}", "pdf": "http://proceedings.mlr.press/v139/lezama21a/lezama21a.pdf", "supp": "", "pdf_size": 8619365, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4286720946120919949&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "IIE, Universidad de la Rep\u00fablica, Montevideo, Uruguay; ECE, Purdue University, West Lafayette, USA; ECE, Purdue University, West Lafayette, USA", "aff_domain": "fing.edu.uy; ; ", "email": "fing.edu.uy; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lezama21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Universidad de la Rep\u00fablica;Purdue University", "aff_unique_dep": "Instituto de Ingenier\u00eda El\u00e9ctrica;ECE", "aff_unique_url": "https://www.universidad.edu.uy;https://www.purdue.edu", "aff_unique_abbr": "Udelar;Purdue", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Montevideo;West Lafayette", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Uruguay;United States" }, { "title": "SAINT-ACC: Safety-Aware Intelligent Adaptive Cruise Control for Autonomous Vehicles Using Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8685", "id": "8685", "proceeding": "http://proceedings.mlr.press/v139/das21a.html", "slides": "", "author_site": "Lokesh Chandra Das, Myounggyu Won", "author": "Lokesh Chandra Das; Myounggyu Won", "abstract": "We present a novel adaptive cruise control (ACC) system namely SAINT-ACC: {S}afety-{A}ware {Int}elligent {ACC} system (SAINT-ACC) that is designed to achieve simultaneous optimization of traffic efficiency, driving safety, and driving comfort through dynamic adaptation of the inter-vehicle gap based on deep reinforcement learning (RL). A novel dual RL agent-based approach is developed to seek and adapt the optimal balance between traffic efficiency and driving safety/comfort by effectively controlling the driving safety model parameters and inter-vehicle gap based on macroscopic and microscopic traffic information collected from dynamically changing and complex traffic environments. Results obtained through over 12,000 simulation runs with varying traffic scenarios and penetration rates demonstrate that SAINT-ACC significantly enhances traffic flow, driving safety and comfort compared with a state-of-the-art approach.", "bibtex": "@InProceedings{pmlr-v139-das21a,\n title = \t {SAINT-ACC: Safety-Aware Intelligent Adaptive Cruise Control for Autonomous Vehicles Using Deep Reinforcement Learning},\n author = {Das, Lokesh Chandra and Won, Myounggyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2445--2455},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/das21a/das21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/das21a.html},\n abstract = \t {We present a novel adaptive cruise control (ACC) system namely SAINT-ACC: {S}afety-{A}ware {Int}elligent {ACC} system (SAINT-ACC) that is designed to achieve simultaneous optimization of traffic efficiency, driving safety, and driving comfort through dynamic adaptation of the inter-vehicle gap based on deep reinforcement learning (RL). A novel dual RL agent-based approach is developed to seek and adapt the optimal balance between traffic efficiency and driving safety/comfort by effectively controlling the driving safety model parameters and inter-vehicle gap based on macroscopic and microscopic traffic information collected from dynamically changing and complex traffic environments. Results obtained through over 12,000 simulation runs with varying traffic scenarios and penetration rates demonstrate that SAINT-ACC significantly enhances traffic flow, driving safety and comfort compared with a state-of-the-art approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/das21a/das21a.pdf", "supp": "", "pdf_size": 1001822, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12219745838056978802&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Memphis, TN, United States; Department of Computer Science, University of Memphis, TN, United States", "aff_domain": "memphis.edu;memphis.edu", "email": "memphis.edu;memphis.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/das21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Memphis", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.memphis.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Memphis", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "SCC: an efficient deep reinforcement learning agent mastering the game of StarCraft II", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9579", "id": "9579", "proceeding": "http://proceedings.mlr.press/v139/wang21v.html", "slides": "", "author_site": "Xiangjun Wang, Junxiao SONG, Penghui Qi, Peng Peng, Zhenkun Tang, Wei Zhang, Weimin Li, Xiongjun Pi, Jujie He, Chao Gao, Haitao Long, Quan Yuan", "author": "Xiangjun Wang; Junxiao Song; Penghui Qi; Peng Peng; Zhenkun Tang; Wei Zhang; Weimin Li; Xiongjun Pi; Jujie He; Chao Gao; Haitao Long; Quan Yuan", "abstract": "AlphaStar, the AI that reaches GrandMaster level in StarCraft II, is a remarkable milestone demonstrating what deep reinforcement learning can achieve in complex Real-Time Strategy (RTS) games. However, the complexities of the game, algorithms and systems, and especially the tremendous amount of computation needed are big obstacles for the community to conduct further research in this direction. We propose a deep reinforcement learning agent, StarCraft Commander (SCC). With order of magnitude less computation, it demonstrates top human performance defeating GrandMaster players in test matches and top professional players in a live event. Moreover, it shows strong robustness to various human strategies and discovers novel strategies unseen from human plays. In this paper, we\u2019ll share the key insights and optimizations on efficient imitation learning and reinforcement learning for StarCraft II full game.", "bibtex": "@InProceedings{pmlr-v139-wang21v,\n title = \t {SCC: an efficient deep reinforcement learning agent mastering the game of StarCraft II},\n author = {Wang, Xiangjun and Song, Junxiao and Qi, Penghui and Peng, Peng and Tang, Zhenkun and Zhang, Wei and Li, Weimin and Pi, Xiongjun and He, Jujie and Gao, Chao and Long, Haitao and Yuan, Quan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10905--10915},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21v/wang21v.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21v.html},\n abstract = \t {AlphaStar, the AI that reaches GrandMaster level in StarCraft II, is a remarkable milestone demonstrating what deep reinforcement learning can achieve in complex Real-Time Strategy (RTS) games. However, the complexities of the game, algorithms and systems, and especially the tremendous amount of computation needed are big obstacles for the community to conduct further research in this direction. We propose a deep reinforcement learning agent, StarCraft Commander (SCC). With order of magnitude less computation, it demonstrates top human performance defeating GrandMaster players in test matches and top professional players in a live event. Moreover, it shows strong robustness to various human strategies and discovers novel strategies unseen from human plays. In this paper, we\u2019ll share the key insights and optimizations on efficient imitation learning and reinforcement learning for StarCraft II full game.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21v/wang21v.pdf", "supp": "", "pdf_size": 1368664, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10562717347382828087&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China; inspir.ai, Beijing, China", "aff_domain": "inspirai.com; ; ; ; ; ; ; ; ; ; ;", "email": "inspirai.com; ; ; ; ; ; ; ; ; ; ;", "github": "", "project": "", "author_num": 12, "oa": "https://proceedings.mlr.press/v139/wang21v.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "inspir.ai", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "SECANT: Self-Expert Cloning for Zero-Shot Generalization of Visual Policies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9317", "id": "9317", "proceeding": "http://proceedings.mlr.press/v139/fan21c.html", "slides": "", "author_site": "Jim Fan, Guanzhi Wang, De-An Huang, Zhiding Yu, Li Fei-Fei, Yuke Zhu, Anima Anandkumar", "author": "Linxi Fan; Guanzhi Wang; De-An Huang; Zhiding Yu; Li Fei-Fei; Yuke Zhu; Animashree Anandkumar", "abstract": "Generalization has been a long-standing challenge for reinforcement learning (RL). Visual RL, in particular, can be easily distracted by irrelevant factors in high-dimensional observation space. In this work, we consider robust policy learning which targets zero-shot generalization to unseen visual environments with large distributional shift. We propose SECANT, a novel self-expert cloning technique that leverages image augmentation in two stages to *decouple* robust representation learning from policy optimization. Specifically, an expert policy is first trained by RL from scratch with weak augmentations. A student network then learns to mimic the expert policy by supervised learning with strong augmentations, making its representation more robust against visual variations compared to the expert. Extensive experiments demonstrate that SECANT significantly advances the state of the art in zero-shot generalization across 4 challenging domains. Our average reward improvements over prior SOTAs are: DeepMind Control (+26.5%), robotic manipulation (+337.8%), vision-based autonomous driving (+47.7%), and indoor object navigation (+15.8%). Code release and video are available at https://linxifan.github.io/secant-site/.", "bibtex": "@InProceedings{pmlr-v139-fan21c,\n title = \t {SECANT: Self-Expert Cloning for Zero-Shot Generalization of Visual Policies},\n author = {Fan, Linxi and Wang, Guanzhi and Huang, De-An and Yu, Zhiding and Fei-Fei, Li and Zhu, Yuke and Anandkumar, Animashree},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3088--3099},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fan21c/fan21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/fan21c.html},\n abstract = \t {Generalization has been a long-standing challenge for reinforcement learning (RL). Visual RL, in particular, can be easily distracted by irrelevant factors in high-dimensional observation space. In this work, we consider robust policy learning which targets zero-shot generalization to unseen visual environments with large distributional shift. We propose SECANT, a novel self-expert cloning technique that leverages image augmentation in two stages to *decouple* robust representation learning from policy optimization. Specifically, an expert policy is first trained by RL from scratch with weak augmentations. A student network then learns to mimic the expert policy by supervised learning with strong augmentations, making its representation more robust against visual variations compared to the expert. Extensive experiments demonstrate that SECANT significantly advances the state of the art in zero-shot generalization across 4 challenging domains. Our average reward improvements over prior SOTAs are: DeepMind Control (+26.5%), robotic manipulation (+337.8%), vision-based autonomous driving (+47.7%), and indoor object navigation (+15.8%). Code release and video are available at https://linxifan.github.io/secant-site/.}\n}", "pdf": "http://proceedings.mlr.press/v139/fan21c/fan21c.pdf", "supp": "", "pdf_size": 9084526, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16889342839830358284&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Stanford University, CA, USA+ NVIDIA, CA, USA; Stanford University, CA, USA+ NVIDIA, CA, USA; NVIDIA, CA, USA; NVIDIA, CA, USA; Stanford University, CA, USA; The University of Texas at Austin, TX, USA+ NVIDIA, CA, USA; California Institute of Technology, CA, USA+ NVIDIA, CA, USA", "aff_domain": "cs.stanford.edu; ; ; ; ; ; ", "email": "cs.stanford.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/fan21c.html", "aff_unique_index": "0+1;0+1;1;1;0;2+1;3+1", "aff_unique_norm": "Stanford University;NVIDIA;University of Texas at Austin;California Institute of Technology", "aff_unique_dep": ";NVIDIA;;", "aff_unique_url": "https://www.stanford.edu;https://www.nvidia.com;https://www.utexas.edu;https://www.caltech.edu", "aff_unique_abbr": "Stanford;NV;UT Austin;Caltech", "aff_campus_unique_index": "0+1;0+1;1;1;0;2+1;3+1", "aff_campus_unique": "California;Santa Clara;Austin;Pasadena", "aff_country_unique_index": "0+0;0+0;0;0;0;0+0;0+0", "aff_country_unique": "United States" }, { "title": "SG-PALM: a Fast Physically Interpretable Tensor Graphical Model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9797", "id": "9797", "proceeding": "http://proceedings.mlr.press/v139/wang21k.html", "slides": "/media/icml-2021/Slides/9797.pdf", "author_site": "Yu Wang, Alfred Hero", "author": "Yu Wang; Alfred Hero", "abstract": "We propose a new graphical model inference procedure, called SG-PALM, for learning conditional dependency structure of high-dimensional tensor-variate data. Unlike most other tensor graphical models the proposed model is interpretable and computationally scalable to high dimension. Physical interpretability follows from the Sylvester generative (SG) model on which SG-PALM is based: the model is exact for any observation process that is a solution of a partial differential equation of Poisson type. Scalability follows from the fast proximal alternating linearized minimization (PALM) procedure that SG-PALM uses during training. We establish that SG-PALM converges linearly (i.e., geometric convergence rate) to a global optimum of its objective function. We demonstrate scalability and accuracy of SG-PALM for an important but challenging climate prediction problem: spatio-temporal forecasting of solar flares from multimodal imaging data.", "bibtex": "@InProceedings{pmlr-v139-wang21k,\n title = \t {SG-PALM: a Fast Physically Interpretable Tensor Graphical Model},\n author = {Wang, Yu and Hero, Alfred},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10783--10793},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21k/wang21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21k.html},\n abstract = \t {We propose a new graphical model inference procedure, called SG-PALM, for learning conditional dependency structure of high-dimensional tensor-variate data. Unlike most other tensor graphical models the proposed model is interpretable and computationally scalable to high dimension. Physical interpretability follows from the Sylvester generative (SG) model on which SG-PALM is based: the model is exact for any observation process that is a solution of a partial differential equation of Poisson type. Scalability follows from the fast proximal alternating linearized minimization (PALM) procedure that SG-PALM uses during training. We establish that SG-PALM converges linearly (i.e., geometric convergence rate) to a global optimum of its objective function. We demonstrate scalability and accuracy of SG-PALM for an important but challenging climate prediction problem: spatio-temporal forecasting of solar flares from multimodal imaging data.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21k/wang21k.pdf", "supp": "", "pdf_size": 991459, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15846965999647833426&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Michigan, Ann Arbor, Michigan, USA; University of Michigan, Ann Arbor, Michigan, USA", "aff_domain": "umich.edu; ", "email": "umich.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wang21k.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ann Arbor", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "SGA: A Robust Algorithm for Partial Recovery of Tree-Structured Graphical Models with Noisy Samples", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8737", "id": "8737", "proceeding": "http://proceedings.mlr.press/v139/tandon21a.html", "slides": "", "author_site": "Anshoo Tandon, Aldric Han, Vincent Tan", "author": "Anshoo Tandon; Aldric Han; Vincent Tan", "abstract": "We consider learning Ising tree models when the observations from the nodes are corrupted by independent but non-identically distributed noise with unknown statistics. Katiyar et al. (2020) showed that although the exact tree structure cannot be recovered, one can recover a partial tree structure; that is, a structure belonging to the equivalence class containing the true tree. This paper presents a systematic improvement of Katiyar et al. (2020). First, we present a novel impossibility result by deriving a bound on the necessary number of samples for partial recovery. Second, we derive a significantly improved sample complexity result in which the dependence on the minimum correlation $\\rho_{\\min}$ is $\\rho_{\\min}^{-8}$ instead of $\\rho_{\\min}^{-24}$. Finally, we propose Symmetrized Geometric Averaging (SGA), a more statistically robust algorithm for partial tree recovery. We provide error exponent analyses and extensive numerical results on a variety of trees to show that the sample complexity of SGA is significantly better than the algorithm of Katiyar et al. (2020). SGA can be readily extended to Gaussian models and is shown via numerical experiments to be similarly superior.", "bibtex": "@InProceedings{pmlr-v139-tandon21a,\n title = \t {SGA: A Robust Algorithm for Partial Recovery of Tree-Structured Graphical Models with Noisy Samples},\n author = {Tandon, Anshoo and Han, Aldric and Tan, Vincent},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10107--10117},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tandon21a/tandon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tandon21a.html},\n abstract = \t {We consider learning Ising tree models when the observations from the nodes are corrupted by independent but non-identically distributed noise with unknown statistics. Katiyar et al. (2020) showed that although the exact tree structure cannot be recovered, one can recover a partial tree structure; that is, a structure belonging to the equivalence class containing the true tree. This paper presents a systematic improvement of Katiyar et al. (2020). First, we present a novel impossibility result by deriving a bound on the necessary number of samples for partial recovery. Second, we derive a significantly improved sample complexity result in which the dependence on the minimum correlation $\\rho_{\\min}$ is $\\rho_{\\min}^{-8}$ instead of $\\rho_{\\min}^{-24}$. Finally, we propose Symmetrized Geometric Averaging (SGA), a more statistically robust algorithm for partial tree recovery. We provide error exponent analyses and extensive numerical results on a variety of trees to show that the sample complexity of SGA is significantly better than the algorithm of Katiyar et al. (2020). SGA can be readily extended to Gaussian models and is shown via numerical experiments to be similarly superior.}\n}", "pdf": "http://proceedings.mlr.press/v139/tandon21a/tandon21a.pdf", "supp": "", "pdf_size": 727780, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5307051521906779100&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Electrical & Computer Engineering, National University of Singapore, Singapore; Department of Mathematics, National University of Singapore, Singapore; Department of Electrical & Computer Engineering, National University of Singapore, Singapore+Department of Mathematics, National University of Singapore, Singapore", "aff_domain": "gmail.com; ;nus.edu.sg", "email": "gmail.com; ;nus.edu.sg", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tandon21a.html", "aff_unique_index": "0;0;0+0", "aff_unique_norm": "National University of Singapore", "aff_unique_dep": "Department of Electrical & Computer Engineering", "aff_unique_url": "https://www.nus.edu.sg", "aff_unique_abbr": "NUS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "Singapore" }, { "title": "SGLB: Stochastic Gradient Langevin Boosting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8665", "id": "8665", "proceeding": "http://proceedings.mlr.press/v139/ustimenko21a.html", "slides": "", "author_site": "Aleksei Ustimenko, Liudmila Prokhorenkova", "author": "Aleksei Ustimenko; Liudmila Prokhorenkova", "abstract": "This paper introduces Stochastic Gradient Langevin Boosting (SGLB) - a powerful and efficient machine learning framework that may deal with a wide range of loss functions and has provable generalization guarantees. The method is based on a special form of the Langevin diffusion equation specifically designed for gradient boosting. This allows us to theoretically guarantee the global convergence even for multimodal loss functions, while standard gradient boosting algorithms can guarantee only local optimum. We also empirically show that SGLB outperforms classic gradient boosting when applied to classification tasks with 0-1 loss function, which is known to be multimodal.", "bibtex": "@InProceedings{pmlr-v139-ustimenko21a,\n title = \t {SGLB: Stochastic Gradient Langevin Boosting},\n author = {Ustimenko, Aleksei and Prokhorenkova, Liudmila},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10487--10496},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ustimenko21a/ustimenko21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ustimenko21a.html},\n abstract = \t {This paper introduces Stochastic Gradient Langevin Boosting (SGLB) - a powerful and efficient machine learning framework that may deal with a wide range of loss functions and has provable generalization guarantees. The method is based on a special form of the Langevin diffusion equation specifically designed for gradient boosting. This allows us to theoretically guarantee the global convergence even for multimodal loss functions, while standard gradient boosting algorithms can guarantee only local optimum. We also empirically show that SGLB outperforms classic gradient boosting when applied to classification tasks with 0-1 loss function, which is known to be multimodal.}\n}", "pdf": "http://proceedings.mlr.press/v139/ustimenko21a/ustimenko21a.pdf", "supp": "", "pdf_size": 352124, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4086540316568811954&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Yandex, Moscow, Russia+Moscow Institute of Physics and Technology, Moscow, Russia+HSE University, Moscow, Russia; Yandex, Moscow, Russia+Moscow Institute of Physics and Technology, Moscow, Russia+HSE University, Moscow, Russia", "aff_domain": "yandex-team.ru; ", "email": "yandex-team.ru; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ustimenko21a.html", "aff_unique_index": "0+1+2;0+1+2", "aff_unique_norm": "Yandex;Moscow Institute of Physics and Technology;HSE University", "aff_unique_dep": ";;", "aff_unique_url": "https://yandex.com;https://www.mipt.ru/en;https://hse.ru", "aff_unique_abbr": "Yandex;MIPT;HSE", "aff_campus_unique_index": "0+0+0;0+0+0", "aff_campus_unique": "Moscow", "aff_country_unique_index": "0+0+0;0+0+0", "aff_country_unique": "Russian Federation" }, { "title": "SKIing on Simplices: Kernel Interpolation on the Permutohedral Lattice for Scalable Gaussian Processes", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9749", "id": "9749", "proceeding": "http://proceedings.mlr.press/v139/kapoor21a.html", "slides": "/media/icml-2021/Slides/9749.pdf", "author_site": "Sanyam Kapoor, Marc Finzi, Ke Alexander Wang, Andrew Wilson", "author": "Sanyam Kapoor; Marc Finzi; Ke Alexander Wang; Andrew Gordon Gordon Wilson", "abstract": "State-of-the-art methods for scalable Gaussian processes use iterative algorithms, requiring fast matrix vector multiplies (MVMs) with the co-variance kernel. The Structured Kernel Interpolation (SKI) framework accelerates these MVMs by performing efficient MVMs on a grid and interpolating back to the original space. In this work, we develop a connection between SKI and the permutohedral lattice used for high-dimensional fast bilateral filtering. Using a sparse simplicial grid instead of a dense rectangular one, we can perform GP inference exponentially faster in the dimension than SKI. Our approach, Simplex-GP, enables scaling SKI to high dimensions, while maintaining strong predictive performance. We additionally provide a CUDA implementation of Simplex-GP, which enables significant GPU acceleration of MVM based inference.", "bibtex": "@InProceedings{pmlr-v139-kapoor21a,\n title = \t {SKIing on Simplices: Kernel Interpolation on the Permutohedral Lattice for Scalable Gaussian Processes},\n author = {Kapoor, Sanyam and Finzi, Marc and Wang, Ke Alexander and Wilson, Andrew Gordon Gordon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5279--5289},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kapoor21a/kapoor21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kapoor21a.html},\n abstract = \t {State-of-the-art methods for scalable Gaussian processes use iterative algorithms, requiring fast matrix vector multiplies (MVMs) with the co-variance kernel. The Structured Kernel Interpolation (SKI) framework accelerates these MVMs by performing efficient MVMs on a grid and interpolating back to the original space. In this work, we develop a connection between SKI and the permutohedral lattice used for high-dimensional fast bilateral filtering. Using a sparse simplicial grid instead of a dense rectangular one, we can perform GP inference exponentially faster in the dimension than SKI. Our approach, Simplex-GP, enables scaling SKI to high dimensions, while maintaining strong predictive performance. We additionally provide a CUDA implementation of Simplex-GP, which enables significant GPU acceleration of MVM based inference.}\n}", "pdf": "http://proceedings.mlr.press/v139/kapoor21a/kapoor21a.pdf", "supp": "", "pdf_size": 904361, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=612518699030619789&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "New York University, NY, USA; New York University, NY, USA; Stanford University, CA, USA; New York University, NY, USA", "aff_domain": "nyu.edu; ; ; ", "email": "nyu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kapoor21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "New York University;Stanford University", "aff_unique_dep": ";", "aff_unique_url": "https://www.nyu.edu;https://www.stanford.edu", "aff_unique_abbr": "NYU;Stanford", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "New York;California", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "SMG: A Shuffling Gradient-Based Method with Momentum", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8437", "id": "8437", "proceeding": "http://proceedings.mlr.press/v139/tran21b.html", "slides": "/media/icml-2021/Slides/8437.pdf", "author_site": "Trang Tran, Lam Nguyen, Quoc Tran-Dinh", "author": "Trang H Tran; Lam M Nguyen; Quoc Tran-Dinh", "abstract": "We combine two advanced ideas widely used in optimization for machine learning: \\textit{shuffling} strategy and \\textit{momentum} technique to develop a novel shuffling gradient-based method with momentum, coined \\textbf{S}huffling \\textbf{M}omentum \\textbf{G}radient (SMG), for non-convex finite-sum optimization problems. While our method is inspired by momentum techniques, its update is fundamentally different from existing momentum-based methods. We establish state-of-the-art convergence rates of SMG for any shuffling strategy using either constant or diminishing learning rate under standard assumptions (i.e. \\textit{$L$-smoothness} and \\textit{bounded variance}). When the shuffling strategy is fixed, we develop another new algorithm that is similar to existing momentum methods, and prove the same convergence rates for this algorithm under the $L$-smoothness and bounded gradient assumptions. We demonstrate our algorithms via numerical simulations on standard datasets and compare them with existing shuffling methods. Our tests have shown encouraging performance of the new algorithms.", "bibtex": "@InProceedings{pmlr-v139-tran21b,\n title = \t {SMG: A Shuffling Gradient-Based Method with Momentum},\n author = {Tran, Trang H and Nguyen, Lam M and Tran-Dinh, Quoc},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10379--10389},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tran21b/tran21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/tran21b.html},\n abstract = \t {We combine two advanced ideas widely used in optimization for machine learning: \\textit{shuffling} strategy and \\textit{momentum} technique to develop a novel shuffling gradient-based method with momentum, coined \\textbf{S}huffling \\textbf{M}omentum \\textbf{G}radient (SMG), for non-convex finite-sum optimization problems. While our method is inspired by momentum techniques, its update is fundamentally different from existing momentum-based methods. We establish state-of-the-art convergence rates of SMG for any shuffling strategy using either constant or diminishing learning rate under standard assumptions (i.e. \\textit{$L$-smoothness} and \\textit{bounded variance}). When the shuffling strategy is fixed, we develop another new algorithm that is similar to existing momentum methods, and prove the same convergence rates for this algorithm under the $L$-smoothness and bounded gradient assumptions. We demonstrate our algorithms via numerical simulations on standard datasets and compare them with existing shuffling methods. Our tests have shown encouraging performance of the new algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/tran21b/tran21b.pdf", "supp": "", "pdf_size": 793263, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6312118897852077708&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Operations Research and Information Engineering, Cornell University, Ithaca, NY, USA; IBM Research, Thomas J. Watson Research Center, Yorktown Heights, NY, USA; Department of Statistics and Operations Research, The University of North Carolina at Chapel Hill, NC, USA", "aff_domain": "ibm.com; ;", "email": "ibm.com; ;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tran21b.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Cornell University;IBM;University of North Carolina at Chapel Hill", "aff_unique_dep": "School of Operations Research and Information Engineering;Thomas J. Watson Research Center;Department of Statistics and Operations Research", "aff_unique_url": "https://www.cornell.edu;https://www.ibm.com/research;https://www.unc.edu", "aff_unique_abbr": "Cornell;IBM;UNC Chapel Hill", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Ithaca;Yorktown Heights;Chapel Hill", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "SPADE: A Spectral Method for Black-Box Adversarial Robustness Evaluation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8987", "id": "8987", "proceeding": "http://proceedings.mlr.press/v139/cheng21a.html", "slides": "", "author_site": "Wuxinlin Cheng, Chenhui Deng, Zhiqiang Zhao, Yaohui Cai, Zhiru Zhang, Zhuo Feng", "author": "Wuxinlin Cheng; Chenhui Deng; Zhiqiang Zhao; Yaohui Cai; Zhiru Zhang; Zhuo Feng", "abstract": "A black-box spectral method is introduced for evaluating the adversarial robustness of a given machine learning (ML) model. Our approach, named SPADE, exploits bijective distance mapping between the input/output graphs constructed for approximating the manifolds corresponding to the input/output data. By leveraging the generalized Courant-Fischer theorem, we propose a SPADE score for evaluating the adversarial robustness of a given model, which is proved to be an upper bound of the best Lipschitz constant under the manifold setting. To reveal the most non-robust data samples highly vulnerable to adversarial attacks, we develop a spectral graph embedding procedure leveraging dominant generalized eigenvectors. This embedding step allows assigning each data point a robustness score that can be further harnessed for more effective adversarial training of ML models. Our experiments show promising empirical results for neural networks trained with the MNIST and CIFAR-10 data sets.", "bibtex": "@InProceedings{pmlr-v139-cheng21a,\n title = \t {SPADE: A Spectral Method for Black-Box Adversarial Robustness Evaluation},\n author = {Cheng, Wuxinlin and Deng, Chenhui and Zhao, Zhiqiang and Cai, Yaohui and Zhang, Zhiru and Feng, Zhuo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1814--1824},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cheng21a/cheng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cheng21a.html},\n abstract = \t {A black-box spectral method is introduced for evaluating the adversarial robustness of a given machine learning (ML) model. Our approach, named SPADE, exploits bijective distance mapping between the input/output graphs constructed for approximating the manifolds corresponding to the input/output data. By leveraging the generalized Courant-Fischer theorem, we propose a SPADE score for evaluating the adversarial robustness of a given model, which is proved to be an upper bound of the best Lipschitz constant under the manifold setting. To reveal the most non-robust data samples highly vulnerable to adversarial attacks, we develop a spectral graph embedding procedure leveraging dominant generalized eigenvectors. This embedding step allows assigning each data point a robustness score that can be further harnessed for more effective adversarial training of ML models. Our experiments show promising empirical results for neural networks trained with the MNIST and CIFAR-10 data sets.}\n}", "pdf": "http://proceedings.mlr.press/v139/cheng21a/cheng21a.pdf", "supp": "", "pdf_size": 3157609, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=174985207826748384&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Stevens Institute of Technology; Cornell University; Stevens Institute of Technology; Cornell University; Cornell University; Stevens Institute of Technology", "aff_domain": "stevens.edu;cornell.edu; ; ; ; ", "email": "stevens.edu;cornell.edu; ; ; ; ", "github": "github.com/Feng-Research/SPADE", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/cheng21a.html", "aff_unique_index": "0;1;0;1;1;0", "aff_unique_norm": "Stevens Institute of Technology;Cornell University", "aff_unique_dep": ";", "aff_unique_url": "https://www.stevens.edu;https://www.cornell.edu", "aff_unique_abbr": "SIT;Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "id": "3f283be2ca", "title": "SPECTRE: defending against backdoor attacks using robust statistics", "site": "https://proceedings.mlr.press/v139/hayase21a.html", "author": "Jonathan Hayase; Weihao Kong; Raghav Somani; Sewoong Oh", "abstract": "Modern machine learning increasingly requires training on a large collection of data from multiple sources, not all of which can be trusted. A particularly frightening scenario is when a small fraction of corrupted data changes the behavior of the trained model when triggered by an attacker-specified watermark. Such a compromised model will be deployed unnoticed as the model is accurate otherwise. There has been promising attempts to use the intermediate representations of such a model to separate corrupted examples from clean ones. However, these methods require a significant fraction of the data to be corrupted, in order to have strong enough signal for detection. We propose a novel defense algorithm using robust covariance estimation to amplify the spectral signature of corrupted data. This defense is able to completely remove backdoors whenever the benchmark backdoor attacks are successful, even in regimes where previous methods have no hope for detecting poisoned examples.", "bibtex": "@InProceedings{pmlr-v139-hayase21a,\n title = \t {SPECTRE: defending against backdoor attacks using robust statistics},\n author = {Hayase, Jonathan and Kong, Weihao and Somani, Raghav and Oh, Sewoong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4129--4139},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hayase21a/hayase21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hayase21a.html},\n abstract = \t {Modern machine learning increasingly requires training on a large collection of data from multiple sources, not all of which can be trusted. A particularly frightening scenario is when a small fraction of corrupted data changes the behavior of the trained model when triggered by an attacker-specified watermark. Such a compromised model will be deployed unnoticed as the model is accurate otherwise. There has been promising attempts to use the intermediate representations of such a model to separate corrupted examples from clean ones. However, these methods require a significant fraction of the data to be corrupted, in order to have strong enough signal for detection. We propose a novel defense algorithm using robust covariance estimation to amplify the spectral signature of corrupted data. This defense is able to completely remove backdoors whenever the benchmark backdoor attacks are successful, even in regimes where previous methods have no hope for detecting poisoned examples.}\n}", "pdf": "http://proceedings.mlr.press/v139/hayase21a/hayase21a.pdf", "supp": "", "pdf_size": 1852342, "gs_citation": 187, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17952878874994811152&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "https://github.com/SewoongLab/spectre-defense", "project": "", "author_num": 4 }, { "title": "STRODE: Stochastic Boundary Ordinary Differential Equation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8689", "id": "8689", "proceeding": "http://proceedings.mlr.press/v139/huang21d.html", "slides": "/media/icml-2021/Slides/8689_004K03l.pdf", "author_site": "Huang Hengguan, Hongfu Liu, Hao Wang, Chang Xiao, Ye Wang", "author": "Hengguan Huang; Hongfu Liu; Hao Wang; Chang Xiao; Ye Wang", "abstract": "Perception of time from sequentially acquired sensory inputs is rooted in everyday behaviors of individual organisms. Yet, most algorithms for time-series modeling fail to learn dynamics of random event timings directly from visual or audio inputs, requiring timing annotations during training that are usually unavailable for real-world applications. For instance, neuroscience perspectives on postdiction imply that there exist variable temporal ranges within which the incoming sensory inputs can affect the earlier perception, but such temporal ranges are mostly unannotated for real applications such as automatic speech recognition (ASR). In this paper, we present a probabilistic ordinary differential equation (ODE), called STochastic boundaRy ODE (STRODE), that learns both the timings and the dynamics of time series data without requiring any timing annotations during training. STRODE allows the usage of differential equations to sample from the posterior point processes, efficiently and analytically. We further provide theoretical guarantees on the learning of STRODE. Our empirical results show that our approach successfully infers event timings of time series data. Our method achieves competitive or superior performances compared to existing state-of-the-art methods for both synthetic and real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-huang21d,\n title = \t {STRODE: Stochastic Boundary Ordinary Differential Equation},\n author = {Huang, Hengguan and Liu, Hongfu and Wang, Hao and Xiao, Chang and Wang, Ye},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4435--4445},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/huang21d/huang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/huang21d.html},\n abstract = \t {Perception of time from sequentially acquired sensory inputs is rooted in everyday behaviors of individual organisms. Yet, most algorithms for time-series modeling fail to learn dynamics of random event timings directly from visual or audio inputs, requiring timing annotations during training that are usually unavailable for real-world applications. For instance, neuroscience perspectives on postdiction imply that there exist variable temporal ranges within which the incoming sensory inputs can affect the earlier perception, but such temporal ranges are mostly unannotated for real applications such as automatic speech recognition (ASR). In this paper, we present a probabilistic ordinary differential equation (ODE), called STochastic boundaRy ODE (STRODE), that learns both the timings and the dynamics of time series data without requiring any timing annotations during training. STRODE allows the usage of differential equations to sample from the posterior point processes, efficiently and analytically. We further provide theoretical guarantees on the learning of STRODE. Our empirical results show that our approach successfully infers event timings of time series data. Our method achieves competitive or superior performances compared to existing state-of-the-art methods for both synthetic and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/huang21d/huang21d.pdf", "supp": "", "pdf_size": 3022264, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3501265210663364162&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "National University of Singapore; National University of Singapore; Rutgers University; National University of Singapore; National University of Singapore", "aff_domain": "comp.nus.edu.sg; ; ; ;comp.nus.edu.sg", "email": "comp.nus.edu.sg; ; ; ;comp.nus.edu.sg", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/huang21d.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "National University of Singapore;Rutgers University", "aff_unique_dep": ";", "aff_unique_url": "https://www.nus.edu.sg;https://www.rutgers.edu", "aff_unique_abbr": "NUS;Rutgers", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "Singapore;United States" }, { "title": "SUNRISE: A Simple Unified Framework for Ensemble Learning in Deep Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10641", "id": "10641", "proceeding": "http://proceedings.mlr.press/v139/lee21g.html", "slides": "/media/icml-2021/Slides/10641.pdf", "author_site": "Kimin Lee, Michael Laskin, Aravind Srinivas, Pieter Abbeel", "author": "Kimin Lee; Michael Laskin; Aravind Srinivas; Pieter Abbeel", "abstract": "Off-policy deep reinforcement learning (RL) has been successful in a range of challenging domains. However, standard off-policy RL algorithms can suffer from several issues, such as instability in Q-learning and balancing exploration and exploitation. To mitigate these issues, we present SUNRISE, a simple unified ensemble method, which is compatible with various off-policy RL algorithms. SUNRISE integrates two key ingredients: (a) ensemble-based weighted Bellman backups, which re-weight target Q-values based on uncertainty estimates from a Q-ensemble, and (b) an inference method that selects actions using the highest upper-confidence bounds for efficient exploration. By enforcing the diversity between agents using Bootstrap with random initialization, we show that these different ideas are largely orthogonal and can be fruitfully integrated, together further improving the performance of existing off-policy RL algorithms, such as Soft Actor-Critic and Rainbow DQN, for both continuous and discrete control tasks on both low-dimensional and high-dimensional environments.", "bibtex": "@InProceedings{pmlr-v139-lee21g,\n title = \t {SUNRISE: A Simple Unified Framework for Ensemble Learning in Deep Reinforcement Learning},\n author = {Lee, Kimin and Laskin, Michael and Srinivas, Aravind and Abbeel, Pieter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6131--6141},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21g/lee21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21g.html},\n abstract = \t {Off-policy deep reinforcement learning (RL) has been successful in a range of challenging domains. However, standard off-policy RL algorithms can suffer from several issues, such as instability in Q-learning and balancing exploration and exploitation. To mitigate these issues, we present SUNRISE, a simple unified ensemble method, which is compatible with various off-policy RL algorithms. SUNRISE integrates two key ingredients: (a) ensemble-based weighted Bellman backups, which re-weight target Q-values based on uncertainty estimates from a Q-ensemble, and (b) an inference method that selects actions using the highest upper-confidence bounds for efficient exploration. By enforcing the diversity between agents using Bootstrap with random initialization, we show that these different ideas are largely orthogonal and can be fruitfully integrated, together further improving the performance of existing off-policy RL algorithms, such as Soft Actor-Critic and Rainbow DQN, for both continuous and discrete control tasks on both low-dimensional and high-dimensional environments.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21g/lee21g.pdf", "supp": "", "pdf_size": 1021629, "gs_citation": 284, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8840831494454574191&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu; ; ; ", "email": "berkeley.edu; ; ; ", "github": "https://github.com/pokaxpoka/sunrise", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lee21g.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Safe Reinforcement Learning Using Advantage-Based Intervention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10129", "id": "10129", "proceeding": "http://proceedings.mlr.press/v139/wagener21a.html", "slides": "/media/icml-2021/Slides/10129.pdf", "author_site": "Nolan Wagener, Byron Boots, Ching-An Cheng", "author": "Nolan C Wagener; Byron Boots; Ching-An Cheng", "abstract": "Many sequential decision problems involve finding a policy that maximizes total reward while obeying safety constraints. Although much recent research has focused on the development of safe reinforcement learning (RL) algorithms that produce a safe policy after training, ensuring safety during training as well remains an open problem. A fundamental challenge is performing exploration while still satisfying constraints in an unknown Markov decision process (MDP). In this work, we address this problem for the chance-constrained setting.We propose a new algorithm, SAILR, that uses an intervention mechanism based on advantage functions to keep the agent safe throughout training and optimizes the agent\u2019s policy using off-the-shelf RL algorithms designed for unconstrained MDPs. Our method comes with strong guarantees on safety during \"both\" training and deployment (i.e., after training and without the intervention mechanism) and policy performance compared to the optimal safety-constrained policy. In our experiments, we show that SAILR violates constraints far less during training than standard safe RL and constrained MDP approaches and converges to a well-performing policy that can be deployed safely without intervention. Our code is available at https://github.com/nolanwagener/safe_rl.", "bibtex": "@InProceedings{pmlr-v139-wagener21a,\n title = \t {Safe Reinforcement Learning Using Advantage-Based Intervention},\n author = {Wagener, Nolan C and Boots, Byron and Cheng, Ching-An},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10630--10640},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wagener21a/wagener21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wagener21a.html},\n abstract = \t {Many sequential decision problems involve finding a policy that maximizes total reward while obeying safety constraints. Although much recent research has focused on the development of safe reinforcement learning (RL) algorithms that produce a safe policy after training, ensuring safety during training as well remains an open problem. A fundamental challenge is performing exploration while still satisfying constraints in an unknown Markov decision process (MDP). In this work, we address this problem for the chance-constrained setting.We propose a new algorithm, SAILR, that uses an intervention mechanism based on advantage functions to keep the agent safe throughout training and optimizes the agent\u2019s policy using off-the-shelf RL algorithms designed for unconstrained MDPs. Our method comes with strong guarantees on safety during \"both\" training and deployment (i.e., after training and without the intervention mechanism) and policy performance compared to the optimal safety-constrained policy. In our experiments, we show that SAILR violates constraints far less during training than standard safe RL and constrained MDP approaches and converges to a well-performing policy that can be deployed safely without intervention. Our code is available at https://github.com/nolanwagener/safe_rl.}\n}", "pdf": "http://proceedings.mlr.press/v139/wagener21a/wagener21a.pdf", "supp": "", "pdf_size": 929252, "gs_citation": 69, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5048043466827651236&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Institute for Robotics and Intelligent Machines, Georgia Institute of Technology, Atlanta, Georgia, USA; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, Washington, USA; Microsoft Research, Redmond, Washington, USA", "aff_domain": "gatech.edu; ; ", "email": "gatech.edu; ; ", "github": "https://github.com/nolanwagener/safe_rl", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wagener21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Georgia Institute of Technology;University of Washington;Microsoft", "aff_unique_dep": "Institute for Robotics and Intelligent Machines;Paul G. Allen School of Computer Science and Engineering;Microsoft Research", "aff_unique_url": "https://www.gatech.edu;https://www.washington.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Georgia Tech;UW;MSR", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Atlanta;Seattle;Redmond", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Safe Reinforcement Learning with Linear Function Approximation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9847", "id": "9847", "proceeding": "http://proceedings.mlr.press/v139/amani21a.html", "slides": "/media/icml-2021/Slides/9847.pdf", "author_site": "Sanae Amani Geshnigani, Christos Thrampoulidis, Lin Yang", "author": "Sanae Amani; Christos Thrampoulidis; Lin Yang", "abstract": "Safety in reinforcement learning has become increasingly important in recent years. Yet, existing solutions either fail to strictly avoid choosing unsafe actions, which may lead to catastrophic results in safety-critical systems, or fail to provide regret guarantees for settings where safety constraints need to be learned. In this paper, we address both problems by first modeling safety as an unknown linear cost function of states and actions, which must always fall below a certain threshold. We then present algorithms, termed SLUCB-QVI and RSLUCB-QVI, for episodic Markov decision processes (MDPs) with linear function approximation. We show that SLUCB-QVI and RSLUCB-QVI, while with \\emph{no safety violation}, achieve a $\\tilde{\\mathcal{O}}\\left(\\kappa\\sqrt{d^3H^3T}\\right)$ regret, nearly matching that of state-of-the-art unsafe algorithms, where $H$ is the duration of each episode, $d$ is the dimension of the feature mapping, $\\kappa$ is a constant characterizing the safety constraints, and $T$ is the total number of action plays. We further present numerical simulations that corroborate our theoretical findings.", "bibtex": "@InProceedings{pmlr-v139-amani21a,\n title = \t {Safe Reinforcement Learning with Linear Function Approximation},\n author = {Amani, Sanae and Thrampoulidis, Christos and Yang, Lin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {243--253},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/amani21a/amani21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/amani21a.html},\n abstract = \t {Safety in reinforcement learning has become increasingly important in recent years. Yet, existing solutions either fail to strictly avoid choosing unsafe actions, which may lead to catastrophic results in safety-critical systems, or fail to provide regret guarantees for settings where safety constraints need to be learned. In this paper, we address both problems by first modeling safety as an unknown linear cost function of states and actions, which must always fall below a certain threshold. We then present algorithms, termed SLUCB-QVI and RSLUCB-QVI, for episodic Markov decision processes (MDPs) with linear function approximation. We show that SLUCB-QVI and RSLUCB-QVI, while with \\emph{no safety violation}, achieve a $\\tilde{\\mathcal{O}}\\left(\\kappa\\sqrt{d^3H^3T}\\right)$ regret, nearly matching that of state-of-the-art unsafe algorithms, where $H$ is the duration of each episode, $d$ is the dimension of the feature mapping, $\\kappa$ is a constant characterizing the safety constraints, and $T$ is the total number of action plays. We further present numerical simulations that corroborate our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v139/amani21a/amani21a.pdf", "supp": "", "pdf_size": 679628, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16545892843539482771&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, University of California, Los Angeles; Department of Electrical and Computer Engineering, University of British Columbia, Vancouver; Department of Electrical and Computer Engineering, University of California, Los Angeles", "aff_domain": "ucla.edu;ece.ubc.ca;ee.ucla.edu", "email": "ucla.edu;ece.ubc.ca;ee.ucla.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/amani21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Los Angeles;University of British Columbia", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.ucla.edu;https://www.ubc.ca", "aff_unique_abbr": "UCLA;UBC", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Los Angeles;Vancouver", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "SagaNet: A Small Sample Gated Network for Pediatric Cancer Diagnosis", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8479", "id": "8479", "proceeding": "http://proceedings.mlr.press/v139/liu21u.html", "slides": "", "author_site": "Yuhan Liu, Shiliang Sun", "author": "Yuhan Liu; Shiliang Sun", "abstract": "The scarcity of available samples and the high annotation cost of medical data cause a bottleneck in many digital diagnosis tasks based on deep learning. This problem is especially severe in pediatric tumor tasks, due to the small population base of children and high sample diversity caused by the high metastasis rate of related tumors. Targeted research on pediatric tumors is urgently needed but lacks sufficient attention. In this work, we propose a novel model to solve the diagnosis task of small round blue cell tumors (SRBCTs). To solve the problem of high noise and high diversity in the small sample scenario, the model is constrained to pay attention to the valid areas in the pathological image with a masking mechanism, and a length-aware loss is proposed to improve the tolerance to feature diversity. We evaluate this framework on a challenging small sample SRBCTs dataset, whose classification is difficult even for professional pathologists. The proposed model shows the best performance compared with state-of-the-art deep models and generalization on another pathological dataset, which illustrates the potentiality of deep learning applications in difficult small sample medical tasks.", "bibtex": "@InProceedings{pmlr-v139-liu21u,\n title = \t {SagaNet: A Small Sample Gated Network for Pediatric Cancer Diagnosis},\n author = {Liu, Yuhan and Sun, Shiliang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6947--6956},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21u/liu21u.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21u.html},\n abstract = \t {The scarcity of available samples and the high annotation cost of medical data cause a bottleneck in many digital diagnosis tasks based on deep learning. This problem is especially severe in pediatric tumor tasks, due to the small population base of children and high sample diversity caused by the high metastasis rate of related tumors. Targeted research on pediatric tumors is urgently needed but lacks sufficient attention. In this work, we propose a novel model to solve the diagnosis task of small round blue cell tumors (SRBCTs). To solve the problem of high noise and high diversity in the small sample scenario, the model is constrained to pay attention to the valid areas in the pathological image with a masking mechanism, and a length-aware loss is proposed to improve the tolerance to feature diversity. We evaluate this framework on a challenging small sample SRBCTs dataset, whose classification is difficult even for professional pathologists. The proposed model shows the best performance compared with state-of-the-art deep models and generalization on another pathological dataset, which illustrates the potentiality of deep learning applications in difficult small sample medical tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21u/liu21u.pdf", "supp": "", "pdf_size": 4095764, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6931214722544587893&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "School of Computer Science and Technology, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China", "aff_domain": "cs.ecnu.edu.cn;cs.ecnu.edu.cn", "email": "cs.ecnu.edu.cn;cs.ecnu.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/liu21u.html", "aff_unique_index": "0;0", "aff_unique_norm": "East China Normal University", "aff_unique_dep": "School of Computer Science and Technology", "aff_unique_url": "http://www.ecnu.edu.cn", "aff_unique_abbr": "ECNU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Shanghai", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Sample Complexity of Robust Linear Classification on Separated Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9381", "id": "9381", "proceeding": "http://proceedings.mlr.press/v139/bhattacharjee21a.html", "slides": "", "author_site": "Robi Bhattacharjee, Somesh Jha, Kamalika Chaudhuri", "author": "Robi Bhattacharjee; Somesh Jha; Kamalika Chaudhuri", "abstract": "We consider the sample complexity of learning with adversarial robustness. Most prior theoretical results for this problem have considered a setting where different classes in the data are close together or overlapping. We consider, in contrast, the well-separated case where there exists a classifier with perfect accuracy and robustness, and show that the sample complexity narrates an entirely different story. Specifically, for linear classifiers, we show a large class of well-separated distributions where the expected robust loss of any algorithm is at least $\\Omega(\\frac{d}{n})$, whereas the max margin algorithm has expected standard loss $O(\\frac{1}{n})$. This shows a gap in the standard and robust losses that cannot be obtained via prior techniques. Additionally, we present an algorithm that, given an instance where the robustness radius is much smaller than the gap between the classes, gives a solution with expected robust loss is $O(\\frac{1}{n})$. This shows that for very well-separated data, convergence rates of $O(\\frac{1}{n})$ are achievable, which is not the case otherwise. Our results apply to robustness measured in any $\\ell_p$ norm with $p > 1$ (including $p = \\infty$).", "bibtex": "@InProceedings{pmlr-v139-bhattacharjee21a,\n title = \t {Sample Complexity of Robust Linear Classification on Separated Data},\n author = {Bhattacharjee, Robi and Jha, Somesh and Chaudhuri, Kamalika},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {884--893},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bhattacharjee21a/bhattacharjee21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bhattacharjee21a.html},\n abstract = \t {We consider the sample complexity of learning with adversarial robustness. Most prior theoretical results for this problem have considered a setting where different classes in the data are close together or overlapping. We consider, in contrast, the well-separated case where there exists a classifier with perfect accuracy and robustness, and show that the sample complexity narrates an entirely different story. Specifically, for linear classifiers, we show a large class of well-separated distributions where the expected robust loss of any algorithm is at least $\\Omega(\\frac{d}{n})$, whereas the max margin algorithm has expected standard loss $O(\\frac{1}{n})$. This shows a gap in the standard and robust losses that cannot be obtained via prior techniques. Additionally, we present an algorithm that, given an instance where the robustness radius is much smaller than the gap between the classes, gives a solution with expected robust loss is $O(\\frac{1}{n})$. This shows that for very well-separated data, convergence rates of $O(\\frac{1}{n})$ are achievable, which is not the case otherwise. Our results apply to robustness measured in any $\\ell_p$ norm with $p > 1$ (including $p = \\infty$).}\n}", "pdf": "http://proceedings.mlr.press/v139/bhattacharjee21a/bhattacharjee21a.pdf", "supp": "", "pdf_size": 399484, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=406906309263221695&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 2, "aff": "University of California, San Diego; University of Wisconsin-Madison; University of California, San Diego", "aff_domain": "eng.ucsd.edu; ; ", "email": "eng.ucsd.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bhattacharjee21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, San Diego;University of Wisconsin-Madison", "aff_unique_dep": ";", "aff_unique_url": "https://www.ucsd.edu;https://www.wisc.edu", "aff_unique_abbr": "UCSD;UW-Madison", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "San Diego;Madison", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sample Efficient Reinforcement Learning In Continuous State Spaces: A Perspective Beyond Linearity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9163", "id": "9163", "proceeding": "http://proceedings.mlr.press/v139/malik21c.html", "slides": "", "author_site": "Dhruv Malik, Aldo Pacchiano, Vishwak Srinivasan, Yuanzhi Li", "author": "Dhruv Malik; Aldo Pacchiano; Vishwak Srinivasan; Yuanzhi Li", "abstract": "Reinforcement learning (RL) is empirically successful in complex nonlinear Markov decision processes (MDPs) with continuous state spaces. By contrast, the majority of theoretical RL literature requires the MDP to satisfy some form of linear structure, in order to guarantee sample efficient RL. Such efforts typically assume the transition dynamics or value function of the MDP are described by linear functions of the state features. To resolve this discrepancy between theory and practice, we introduce the Effective Planning Window (EPW) condition, a structural condition on MDPs that makes no linearity assumptions. We demonstrate that the EPW condition permits sample efficient RL, by providing an algorithm which provably solves MDPs satisfying this condition. Our algorithm requires minimal assumptions on the policy class, which can include multi-layer neural networks with nonlinear activation functions. Notably, the EPW condition is directly motivated by popular gaming benchmarks, and we show that many classic Atari games satisfy this condition. We additionally show the necessity of conditions like EPW, by demonstrating that simple MDPs with slight nonlinearities cannot be solved sample efficiently.", "bibtex": "@InProceedings{pmlr-v139-malik21c,\n title = \t {Sample Efficient Reinforcement Learning In Continuous State Spaces: A Perspective Beyond Linearity},\n author = {Malik, Dhruv and Pacchiano, Aldo and Srinivasan, Vishwak and Li, Yuanzhi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7412--7422},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/malik21c/malik21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/malik21c.html},\n abstract = \t {Reinforcement learning (RL) is empirically successful in complex nonlinear Markov decision processes (MDPs) with continuous state spaces. By contrast, the majority of theoretical RL literature requires the MDP to satisfy some form of linear structure, in order to guarantee sample efficient RL. Such efforts typically assume the transition dynamics or value function of the MDP are described by linear functions of the state features. To resolve this discrepancy between theory and practice, we introduce the Effective Planning Window (EPW) condition, a structural condition on MDPs that makes no linearity assumptions. We demonstrate that the EPW condition permits sample efficient RL, by providing an algorithm which provably solves MDPs satisfying this condition. Our algorithm requires minimal assumptions on the policy class, which can include multi-layer neural networks with nonlinear activation functions. Notably, the EPW condition is directly motivated by popular gaming benchmarks, and we show that many classic Atari games satisfy this condition. We additionally show the necessity of conditions like EPW, by demonstrating that simple MDPs with slight nonlinearities cannot be solved sample efficiently.}\n}", "pdf": "http://proceedings.mlr.press/v139/malik21c/malik21c.pdf", "supp": "", "pdf_size": 391549, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2268067701444329451&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Machine Learning Department, Carnegie Mellon University; Department of EECS, UC Berkeley; Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University", "aff_domain": "andrew.cmu.edu; ; ; ", "email": "andrew.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/malik21c.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Carnegie Mellon University;University of California, Berkeley", "aff_unique_dep": "Machine Learning Department;Department of Electrical Engineering and Computer Sciences", "aff_unique_url": "https://www.cmu.edu;https://www.berkeley.edu", "aff_unique_abbr": "CMU;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Sample-Optimal PAC Learning of Halfspaces with Malicious Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9095", "id": "9095", "proceeding": "http://proceedings.mlr.press/v139/shen21b.html", "slides": "", "author": "Jie Shen", "abstract": "We study efficient PAC learning of homogeneous halfspaces in $\\mathbb{R}^d$ in the presence of malicious noise of Valiant (1985). This is a challenging noise model and only until recently has near-optimal noise tolerance bound been established under the mild condition that the unlabeled data distribution is isotropic log-concave. However, it remains unsettled how to obtain the optimal sample complexity simultaneously. In this work, we present a new analysis for the algorithm of Awasthi et al. (2017) and show that it essentially achieves the near-optimal sample complexity bound of $\\tilde{O}(d)$, improving the best known result of $\\tilde{O}(d^2)$. Our main ingredient is a novel incorporation of a matrix Chernoff-type inequality to bound the spectrum of an empirical covariance matrix for well-behaved distributions, in conjunction with a careful exploration of the localization schemes of Awasthi et al. (2017). We further extend the algorithm and analysis to the more general and stronger nasty noise model of Bshouty et al. (2002), showing that it is still possible to achieve near-optimal noise tolerance and sample complexity in polynomial time.", "bibtex": "@InProceedings{pmlr-v139-shen21b,\n title = \t {Sample-Optimal PAC Learning of Halfspaces with Malicious Noise},\n author = {Shen, Jie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9515--9524},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shen21b/shen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/shen21b.html},\n abstract = \t {We study efficient PAC learning of homogeneous halfspaces in $\\mathbb{R}^d$ in the presence of malicious noise of Valiant (1985). This is a challenging noise model and only until recently has near-optimal noise tolerance bound been established under the mild condition that the unlabeled data distribution is isotropic log-concave. However, it remains unsettled how to obtain the optimal sample complexity simultaneously. In this work, we present a new analysis for the algorithm of Awasthi et al. (2017) and show that it essentially achieves the near-optimal sample complexity bound of $\\tilde{O}(d)$, improving the best known result of $\\tilde{O}(d^2)$. Our main ingredient is a novel incorporation of a matrix Chernoff-type inequality to bound the spectrum of an empirical covariance matrix for well-behaved distributions, in conjunction with a careful exploration of the localization schemes of Awasthi et al. (2017). We further extend the algorithm and analysis to the more general and stronger nasty noise model of Bshouty et al. (2002), showing that it is still possible to achieve near-optimal noise tolerance and sample complexity in polynomial time.}\n}", "pdf": "http://proceedings.mlr.press/v139/shen21b/shen21b.pdf", "supp": "", "pdf_size": 293802, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9959741180670263619&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stevens Institute of Technology, Hoboken, New Jersey, USA", "aff_domain": "stevens.edu", "email": "stevens.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/shen21b.html", "aff_unique_index": "0", "aff_unique_norm": "Stevens Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.stevens.edu", "aff_unique_abbr": "SIT", "aff_campus_unique_index": "0", "aff_campus_unique": "Hoboken", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Sawtooth Factorial Topic Embeddings Guided Gamma Belief Network", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8403", "id": "8403", "proceeding": "http://proceedings.mlr.press/v139/duan21b.html", "slides": "/media/icml-2021/Slides/8403.pdf", "author_site": "Zhibin Duan, Dongsheng Wang, Bo Chen, CHAOJIE WANG, Wenchao Chen, yewen li, Jie Ren, Mingyuan Zhou", "author": "Zhibin Duan; Dongsheng Wang; Bo Chen; Chaojie Wang; Wenchao Chen; Yewen Li; Jie Ren; Mingyuan Zhou", "abstract": "Hierarchical topic models such as the gamma belief network (GBN) have delivered promising results in mining multi-layer document representations and discovering interpretable topic taxonomies. However, they often assume in the prior that the topics at each layer are independently drawn from the Dirichlet distribution, ignoring the dependencies between the topics both at the same layer and across different layers. To relax this assumption, we propose sawtooth factorial topic embedding guided GBN, a deep generative model of documents that captures the dependencies and semantic similarities between the topics in the embedding space. Specifically, both the words and topics are represented as embedding vectors of the same dimension. The topic matrix at a layer is factorized into the product of a factor loading matrix and a topic embedding matrix, the transpose of which is set as the factor loading matrix of the layer above. Repeating this particular type of factorization, which shares components between adjacent layers, leads to a structure referred to as sawtooth factorization. An auto-encoding variational inference network is constructed to optimize the model parameter via stochastic gradient descent. Experiments on big corpora show that our models outperform other neural topic models on extracting deeper interpretable topics and deriving better document representations.", "bibtex": "@InProceedings{pmlr-v139-duan21b,\n title = \t {Sawtooth Factorial Topic Embeddings Guided Gamma Belief Network},\n author = {Duan, Zhibin and Wang, Dongsheng and Chen, Bo and Wang, Chaojie and Chen, Wenchao and Li, Yewen and Ren, Jie and Zhou, Mingyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2903--2913},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/duan21b/duan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/duan21b.html},\n abstract = \t {Hierarchical topic models such as the gamma belief network (GBN) have delivered promising results in mining multi-layer document representations and discovering interpretable topic taxonomies. However, they often assume in the prior that the topics at each layer are independently drawn from the Dirichlet distribution, ignoring the dependencies between the topics both at the same layer and across different layers. To relax this assumption, we propose sawtooth factorial topic embedding guided GBN, a deep generative model of documents that captures the dependencies and semantic similarities between the topics in the embedding space. Specifically, both the words and topics are represented as embedding vectors of the same dimension. The topic matrix at a layer is factorized into the product of a factor loading matrix and a topic embedding matrix, the transpose of which is set as the factor loading matrix of the layer above. Repeating this particular type of factorization, which shares components between adjacent layers, leads to a structure referred to as sawtooth factorization. An auto-encoding variational inference network is constructed to optimize the model parameter via stochastic gradient descent. Experiments on big corpora show that our models outperform other neural topic models on extracting deeper interpretable topics and deriving better document representations.}\n}", "pdf": "http://proceedings.mlr.press/v139/duan21b/duan21b.pdf", "supp": "", "pdf_size": 568312, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14933868730567582356&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; National Laboratory of Radar Signal Processing, Xidian University, Xi\u2019an, China; McCombs School of Business The University of Texas at Austin, Austin, TX 78712, USA", "aff_domain": "mail.xidian.edu.com; ; ; ; ; ; ; ", "email": "mail.xidian.edu.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/duan21b.html", "aff_unique_index": "0;0;0;0;0;0;0;1", "aff_unique_norm": "Xidian University;University of Texas at Austin", "aff_unique_dep": "National Laboratory of Radar Signal Processing;McCombs School of Business", "aff_unique_url": "http://www.xidian.edu.cn/;https://www.mccombs.utexas.edu", "aff_unique_abbr": "Xidian;UT Austin", "aff_campus_unique_index": "0;0;0;0;0;0;0;1", "aff_campus_unique": "Xi'an;Austin", "aff_country_unique_index": "0;0;0;0;0;0;0;1", "aff_country_unique": "China;United States" }, { "title": "Scalable Certified Segmentation via Randomized Smoothing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8809", "id": "8809", "proceeding": "http://proceedings.mlr.press/v139/fischer21a.html", "slides": "/media/icml-2021/Slides/8809.pdf", "author_site": "Marc Fischer, Maximilian Baader, Martin Vechev", "author": "Marc Fischer; Maximilian Baader; Martin Vechev", "abstract": "We present a new certification method for image and point cloud segmentation based on randomized smoothing. The method leverages a novel scalable algorithm for prediction and certification that correctly accounts for multiple testing, necessary for ensuring statistical guarantees. The key to our approach is reliance on established multiple-testing correction mechanisms as well as the ability to abstain from classifying single pixels or points while still robustly segmenting the overall input. Our experimental evaluation on synthetic data and challenging datasets, such as Pascal Context, Cityscapes, and ShapeNet, shows that our algorithm can achieve, for the first time, competitive accuracy and certification guarantees on real-world segmentation tasks. We provide an implementation at https://github.com/eth-sri/segmentation-smoothing.", "bibtex": "@InProceedings{pmlr-v139-fischer21a,\n title = \t {Scalable Certified Segmentation via Randomized Smoothing},\n author = {Fischer, Marc and Baader, Maximilian and Vechev, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3340--3351},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fischer21a/fischer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fischer21a.html},\n abstract = \t {We present a new certification method for image and point cloud segmentation based on randomized smoothing. The method leverages a novel scalable algorithm for prediction and certification that correctly accounts for multiple testing, necessary for ensuring statistical guarantees. The key to our approach is reliance on established multiple-testing correction mechanisms as well as the ability to abstain from classifying single pixels or points while still robustly segmenting the overall input. Our experimental evaluation on synthetic data and challenging datasets, such as Pascal Context, Cityscapes, and ShapeNet, shows that our algorithm can achieve, for the first time, competitive accuracy and certification guarantees on real-world segmentation tasks. We provide an implementation at https://github.com/eth-sri/segmentation-smoothing.}\n}", "pdf": "http://proceedings.mlr.press/v139/fischer21a/fischer21a.pdf", "supp": "", "pdf_size": 2670944, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9847674407340584512&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, ETH Zurich, Switzerland; Department of Computer Science, ETH Zurich, Switzerland; Department of Computer Science, ETH Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ; ", "email": "inf.ethz.ch; ; ", "github": "https://github.com/eth-sri/segmentation-smoothing", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fischer21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Scalable Computations of Wasserstein Barycenter via Input Convex Neural Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8951", "id": "8951", "proceeding": "http://proceedings.mlr.press/v139/chen21e.html", "slides": "/media/icml-2021/Slides/8951_qv4N7UL.pdf", "author_site": "Jiaojiao Fan, Amirhossein Taghvaei, Yongxin Chen", "author": "Jiaojiao Fan; Amirhossein Taghvaei; Yongxin Chen", "abstract": "Wasserstein Barycenter is a principled approach to represent the weighted mean of a given set of probability distributions, utilizing the geometry induced by optimal transport. In this work, we present a novel scalable algorithm to approximate the Wasserstein Barycenters aiming at high-dimensional applications in machine learning. Our proposed algorithm is based on the Kantorovich dual formulation of the Wasserstein-2 distance as well as a recent neural network architecture, input convex neural network, that is known to parametrize convex functions. The distinguishing features of our method are: i) it only requires samples from the marginal distributions; ii) unlike the existing approaches, it represents the Barycenter with a generative model and can thus generate infinite samples from the barycenter without querying the marginal distributions; iii) it works similar to Generative Adversarial Model in one marginal case. We demonstratethe efficacy of our algorithm by comparing it with the state-of-art methods in multiple experiments.", "bibtex": "@InProceedings{pmlr-v139-fan21d,\n title = \t {Scalable Computations of Wasserstein Barycenter via Input Convex Neural Networks},\n author = {Fan, Jiaojiao, Taghvaei, Amirhossein and Chen, Yongxin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1571--1581},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fan21d/fan21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/fan21d.html},\n abstract = \t {Wasserstein Barycenter is a principled approach to represent the weighted mean of a given set of probability distributions, utilizing the geometry induced by optimal transport. In this work, we present a novel scalable algorithm to approximate the Wasserstein Barycenters aiming at high-dimensional applications in machine learning. Our proposed algorithm is based on the Kantorovich dual formulation of the Wasserstein-2 distance as well as a recent neural network architecture, input convex neural network, that is known to parametrize convex functions. The distinguishing features of our method are: i) it only requires samples from the marginal distributions; ii) unlike the existing approaches, it represents the Barycenter with a generative model and can thus generate infinite samples from the barycenter without querying the marginal distributions; iii) it works similar to Generative Adversarial Model in one marginal case. We demonstratethe efficacy of our algorithm by comparing it with the state-of-art methods in multiple experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/fan21d/fan21d.pdf", "supp": "", "pdf_size": 8039230, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7480420834678810462&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Georgia Institute of Technology; University of California, Irvine; Georgia Institute of Technology", "aff_domain": "gatech.edu; ;gatech.edu", "email": "gatech.edu; ;gatech.edu", "github": "https://github.com/sbyebss/Scalable-Wasserstein-Barycenter", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fan21d.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Georgia Institute of Technology;University of California, Irvine", "aff_unique_dep": ";", "aff_unique_url": "https://www.gatech.edu;https://www.uci.edu", "aff_unique_abbr": "Georgia Tech;UCI", "aff_campus_unique_index": "1", "aff_campus_unique": ";Irvine", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Scalable Evaluation of Multi-Agent Reinforcement Learning with Melting Pot", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9947", "id": "9947", "proceeding": "http://proceedings.mlr.press/v139/leibo21a.html", "slides": "", "author_site": "Joel Z Leibo, Edgar Duenez-Guzman, Alexander Vezhnevets, John Agapiou, Peter Sunehag, Raphael Koster, Jayd Matyas, Charles Beattie, Igor Mordatch, Thore Graepel", "author": "Joel Z Leibo; Edgar A Due\u00f1ez-Guzman; Alexander Vezhnevets; John P Agapiou; Peter Sunehag; Raphael Koster; Jayd Matyas; Charlie Beattie; Igor Mordatch; Thore Graepel", "abstract": "Existing evaluation suites for multi-agent reinforcement learning (MARL) do not assess generalization to novel situations as their primary objective (unlike supervised learning benchmarks). Our contribution, Melting Pot, is a MARL evaluation suite that fills this gap and uses reinforcement learning to reduce the human labor required to create novel test scenarios. This works because one agent\u2019s behavior constitutes (part of) another agent\u2019s environment. To demonstrate scalability, we have created over 80 unique test scenarios covering a broad range of research topics such as social dilemmas, reciprocity, resource sharing, and task partitioning. We apply these test scenarios to standard MARL training algorithms, and demonstrate how Melting Pot reveals weaknesses not apparent from training performance alone.", "bibtex": "@InProceedings{pmlr-v139-leibo21a,\n title = \t {Scalable Evaluation of Multi-Agent Reinforcement Learning with Melting Pot},\n author = {Leibo, Joel Z and Due{\\~n}ez-Guzman, Edgar A and Vezhnevets, Alexander and Agapiou, John P and Sunehag, Peter and Koster, Raphael and Matyas, Jayd and Beattie, Charlie and Mordatch, Igor and Graepel, Thore},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6187--6199},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/leibo21a/leibo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/leibo21a.html},\n abstract = \t {Existing evaluation suites for multi-agent reinforcement learning (MARL) do not assess generalization to novel situations as their primary objective (unlike supervised learning benchmarks). Our contribution, Melting Pot, is a MARL evaluation suite that fills this gap and uses reinforcement learning to reduce the human labor required to create novel test scenarios. This works because one agent\u2019s behavior constitutes (part of) another agent\u2019s environment. To demonstrate scalability, we have created over 80 unique test scenarios covering a broad range of research topics such as social dilemmas, reciprocity, resource sharing, and task partitioning. We apply these test scenarios to standard MARL training algorithms, and demonstrate how Melting Pot reveals weaknesses not apparent from training performance alone.}\n}", "pdf": "http://proceedings.mlr.press/v139/leibo21a/leibo21a.pdf", "supp": "", "pdf_size": 2228428, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1228594000566969194&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; Google Brain; DeepMind", "aff_domain": "google.com; ; ; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v139/leibo21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;1;0", "aff_unique_norm": "DeepMind;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://deepmind.com;https://brain.google.com", "aff_unique_abbr": "DeepMind;Google Brain", "aff_campus_unique_index": "1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0;0;1;0", "aff_country_unique": "United Kingdom;United States" }, { "title": "Scalable Marginal Likelihood Estimation for Model Selection in Deep Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9469", "id": "9469", "proceeding": "http://proceedings.mlr.press/v139/immer21a.html", "slides": "/media/icml-2021/Slides/9469.pdf", "author_site": "Alexander Immer, Matthias Bauer, Vincent Fortuin, Gunnar Ratsch, Khan Emtiyaz", "author": "Alexander Immer; Matthias Bauer; Vincent Fortuin; Gunnar R\u00e4tsch; Khan Mohammad Emtiyaz", "abstract": "Marginal-likelihood based model-selection, even though promising, is rarely used in deep learning due to estimation difficulties. Instead, most approaches rely on validation data, which may not be readily available. In this work, we present a scalable marginal-likelihood estimation method to select both hyperparameters and network architectures, based on the training data alone. Some hyperparameters can be estimated online during training, simplifying the procedure. Our marginal-likelihood estimate is based on Laplace\u2019s method and Gauss-Newton approximations to the Hessian, and it outperforms cross-validation and manual tuning on standard regression and image classification datasets, especially in terms of calibration and out-of-distribution detection. Our work shows that marginal likelihoods can improve generalization and be useful when validation data is unavailable (e.g., in nonstationary settings).", "bibtex": "@InProceedings{pmlr-v139-immer21a,\n title = \t {Scalable Marginal Likelihood Estimation for Model Selection in Deep Learning},\n author = {Immer, Alexander and Bauer, Matthias and Fortuin, Vincent and R{\\\"a}tsch, Gunnar and Emtiyaz, Khan Mohammad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4563--4573},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/immer21a/immer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/immer21a.html},\n abstract = \t {Marginal-likelihood based model-selection, even though promising, is rarely used in deep learning due to estimation difficulties. Instead, most approaches rely on validation data, which may not be readily available. In this work, we present a scalable marginal-likelihood estimation method to select both hyperparameters and network architectures, based on the training data alone. Some hyperparameters can be estimated online during training, simplifying the procedure. Our marginal-likelihood estimate is based on Laplace\u2019s method and Gauss-Newton approximations to the Hessian, and it outperforms cross-validation and manual tuning on standard regression and image classification datasets, especially in terms of calibration and out-of-distribution detection. Our work shows that marginal likelihoods can improve generalization and be useful when validation data is unavailable (e.g., in nonstationary settings).}\n}", "pdf": "http://proceedings.mlr.press/v139/immer21a/immer21a.pdf", "supp": "", "pdf_size": 2207494, "gs_citation": 136, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11062863403728072122&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, ETH Zurich, Switzerland + Max Planck ETH Center for Learning Systems (CLS); Max Planck Institute for Intelligent Systems, Germany + University of Cambridge, UK; Department of Computer Science, ETH Zurich, Switzerland; Max Planck ETH Center for Learning Systems (CLS); RIKEN Center for Advanced Intelligence Project, Japan", "aff_domain": "inf.ethz.ch; ; ; ; ", "email": "inf.ethz.ch; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/immer21a.html", "aff_unique_index": "0+1;2+3;0;1;4", "aff_unique_norm": "ETH Zurich;Max Planck ETH Center for Learning Systems;Max Planck Institute for Intelligent Systems;University of Cambridge;RIKEN Center for Advanced Intelligence Project", "aff_unique_dep": "Department of Computer Science;Center for Learning Systems;;;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.ethz.ch;https://cls.ethz.ch;https://www.mpi-is.mpg.de;https://www.cam.ac.uk;https://www.riken.jp/en/c-aip/", "aff_unique_abbr": "ETHZ;CLS;MPI-IS;Cambridge;RIKEN C-AIP", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0+0;1+2;0;0;3", "aff_country_unique": "Switzerland;Germany;United Kingdom;Japan" }, { "title": "Scalable Normalizing Flows for Permutation Invariant Densities", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10325", "id": "10325", "proceeding": "http://proceedings.mlr.press/v139/bilos21a.html", "slides": "", "author_site": "Marin Bilo\u0161, Stephan G\u00fcnnemann", "author": "Marin Bilo\u0161; Stephan G\u00fcnnemann", "abstract": "Modeling sets is an important problem in machine learning since this type of data can be found in many domains. A promising approach defines a family of permutation invariant densities with continuous normalizing flows. This allows us to maximize the likelihood directly and sample new realizations with ease. In this work, we demonstrate how calculating the trace, a crucial step in this method, raises issues that occur both during training and inference, limiting its practicality. We propose an alternative way of defining permutation equivariant transformations that give closed form trace. This leads not only to improvements while training, but also to better final performance. We demonstrate the benefits of our approach on point processes and general set modeling.", "bibtex": "@InProceedings{pmlr-v139-bilos21a,\n title = \t {Scalable Normalizing Flows for Permutation Invariant Densities},\n author = {Bilo{\\v{s}}, Marin and G{\\\"u}nnemann, Stephan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {957--967},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bilos21a/bilos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bilos21a.html},\n abstract = \t {Modeling sets is an important problem in machine learning since this type of data can be found in many domains. A promising approach defines a family of permutation invariant densities with continuous normalizing flows. This allows us to maximize the likelihood directly and sample new realizations with ease. In this work, we demonstrate how calculating the trace, a crucial step in this method, raises issues that occur both during training and inference, limiting its practicality. We propose an alternative way of defining permutation equivariant transformations that give closed form trace. This leads not only to improvements while training, but also to better final performance. We demonstrate the benefits of our approach on point processes and general set modeling.}\n}", "pdf": "http://proceedings.mlr.press/v139/bilos21a/bilos21a.pdf", "supp": "", "pdf_size": 1963786, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10697108842261651954&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Technical University of Munich; Technical University of Munich", "aff_domain": "in.tum.de; ", "email": "in.tum.de; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bilos21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Munich", "aff_unique_dep": "", "aff_unique_url": "https://www.tum.de", "aff_unique_abbr": "TUM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "Scalable Optimal Transport in High Dimensions for Graph Distances, Embedding Alignment, and More", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8777", "id": "8777", "proceeding": "http://proceedings.mlr.press/v139/klicpera21a.html", "slides": "/media/icml-2021/Slides/8777.pdf", "author_site": "Johannes Gasteiger, Marten Lienen, Stephan G\u00fcnnemann", "author": "Johannes Gasteiger; Marten Lienen; Stephan G\u00fcnnemann", "abstract": "The current best practice for computing optimal transport (OT) is via entropy regularization and Sinkhorn iterations. This algorithm runs in quadratic time as it requires the full pairwise cost matrix, which is prohibitively expensive for large sets of objects. In this work we propose two effective log-linear time approximations of the cost matrix: First, a sparse approximation based on locality sensitive hashing (LSH) and, second, a Nystr{\u00f6}m approximation with LSH-based sparse corrections, which we call locally corrected Nystr{\u00f6}m (LCN). These approximations enable general log-linear time algorithms for entropy-regularized OT that perform well even for the complex, high-dimensional spaces common in deep learning. We analyse these approximations theoretically and evaluate them experimentally both directly and end-to-end as a component for real-world applications. Using our approximations for unsupervised word embedding alignment enables us to speed up a state-of-the-art method by a factor of 3 while also improving the accuracy by 3.1 percentage points without any additional model changes. For graph distance regression we propose the graph transport network (GTN), which combines graph neural networks (GNNs) with enhanced Sinkhorn. GTN outcompetes previous models by 48% and still scales log-linearly in the number of nodes.", "bibtex": "@InProceedings{pmlr-v139-gasteiger21a,\n title = \t {Scalable Optimal Transport in High Dimensions for Graph Distances, Embedding Alignment, and More},\n author = {Gasteiger, Johannes and Lienen, Marten and G{\\\"u}nnemann, Stephan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5616--5627},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gasteiger21a/gasteiger21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gasteiger21a.html},\n abstract = \t {The current best practice for computing optimal transport (OT) is via entropy regularization and Sinkhorn iterations. This algorithm runs in quadratic time as it requires the full pairwise cost matrix, which is prohibitively expensive for large sets of objects. In this work we propose two effective log-linear time approximations of the cost matrix: First, a sparse approximation based on locality sensitive hashing (LSH) and, second, a Nystr{\u00f6}m approximation with LSH-based sparse corrections, which we call locally corrected Nystr{\u00f6}m (LCN). These approximations enable general log-linear time algorithms for entropy-regularized OT that perform well even for the complex, high-dimensional spaces common in deep learning. We analyse these approximations theoretically and evaluate them experimentally both directly and end-to-end as a component for real-world applications. Using our approximations for unsupervised word embedding alignment enables us to speed up a state-of-the-art method by a factor of 3 while also improving the accuracy by 3.1 percentage points without any additional model changes. For graph distance regression we propose the graph transport network (GTN), which combines graph neural networks (GNNs) with enhanced Sinkhorn. GTN outcompetes previous models by 48% and still scales log-linearly in the number of nodes.}\n}", "pdf": "http://proceedings.mlr.press/v139/gasteiger21a/gasteiger21a.pdf", "supp": "", "pdf_size": 642874, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8087097144288825268&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Technical University of Munich; Technical University of Munich; Technical University of Munich", "aff_domain": "in.tum.de; ; ", "email": "in.tum.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gasteiger21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Technical University of Munich", "aff_unique_dep": "", "aff_unique_url": "https://www.tum.de", "aff_unique_abbr": "TUM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Scalable Variational Gaussian Processes via Harmonic Kernel Decomposition", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9537", "id": "9537", "proceeding": "http://proceedings.mlr.press/v139/sun21d.html", "slides": "", "author_site": "Shengyang Sun, Jiaxin Shi, Andrew Wilson, Roger Grosse", "author": "Shengyang Sun; Jiaxin Shi; Andrew Gordon Gordon Wilson; Roger B Grosse", "abstract": "We introduce a new scalable variational Gaussian process approximation which provides a high fidelity approximation while retaining general applicability. We propose the harmonic kernel decomposition (HKD), which uses Fourier series to decompose a kernel as a sum of orthogonal kernels. Our variational approximation exploits this orthogonality to enable a large number of inducing points at a low computational cost. We demonstrate that, on a range of regression and classification problems, our approach can exploit input space symmetries such as translations and reflections, and it significantly outperforms standard variational methods in scalability and accuracy. Notably, our approach achieves state-of-the-art results on CIFAR-10 among pure GP models.", "bibtex": "@InProceedings{pmlr-v139-sun21d,\n title = \t {Scalable Variational Gaussian Processes via Harmonic Kernel Decomposition},\n author = {Sun, Shengyang and Shi, Jiaxin and Wilson, Andrew Gordon Gordon and Grosse, Roger B},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9955--9965},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sun21d/sun21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/sun21d.html},\n abstract = \t {We introduce a new scalable variational Gaussian process approximation which provides a high fidelity approximation while retaining general applicability. We propose the harmonic kernel decomposition (HKD), which uses Fourier series to decompose a kernel as a sum of orthogonal kernels. Our variational approximation exploits this orthogonality to enable a large number of inducing points at a low computational cost. We demonstrate that, on a range of regression and classification problems, our approach can exploit input space symmetries such as translations and reflections, and it significantly outperforms standard variational methods in scalability and accuracy. Notably, our approach achieves state-of-the-art results on CIFAR-10 among pure GP models.}\n}", "pdf": "http://proceedings.mlr.press/v139/sun21d/sun21d.pdf", "supp": "", "pdf_size": 7178224, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5527723102830248655&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Toronto + Vector Institute; Microsoft Research New England; New York University; University of Toronto + Vector Institute", "aff_domain": "cs.toronto.edu; ; ; ", "email": "cs.toronto.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/sun21d.html", "aff_unique_index": "0+1;2;3;0+1", "aff_unique_norm": "University of Toronto;Vector Institute;Microsoft;New York University", "aff_unique_dep": ";;Microsoft Research;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.microsoft.com/en-us/research/group/microsoft-research-new-england;https://www.nyu.edu", "aff_unique_abbr": "U of T;Vector Institute;MSR NE;NYU", "aff_campus_unique_index": ";1;", "aff_campus_unique": ";New England", "aff_country_unique_index": "0+0;1;1;0+0", "aff_country_unique": "Canada;United States" }, { "title": "Scaling Multi-Agent Reinforcement Learning with Selective Parameter Sharing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8583", "id": "8583", "proceeding": "http://proceedings.mlr.press/v139/christianos21a.html", "slides": "", "author_site": "Filippos Christianos, Georgios Papoudakis, Muhammad Arrasy Rahman, Stefano V. Albrecht", "author": "Filippos Christianos; Georgios Papoudakis; Muhammad A Rahman; Stefano V Albrecht", "abstract": "Sharing parameters in multi-agent deep reinforcement learning has played an essential role in allowing algorithms to scale to a large number of agents. Parameter sharing between agents significantly decreases the number of trainable parameters, shortening training times to tractable levels, and has been linked to more efficient learning. However, having all agents share the same parameters can also have a detrimental effect on learning. We demonstrate the impact of parameter sharing methods on training speed and converged returns, establishing that when applied indiscriminately, their effectiveness is highly dependent on the environment. We propose a novel method to automatically identify agents which may benefit from sharing parameters by partitioning them based on their abilities and goals. Our approach combines the increased sample efficiency of parameter sharing with the representational capacity of multiple independent networks to reduce training time and increase final returns.", "bibtex": "@InProceedings{pmlr-v139-christianos21a,\n title = \t {Scaling Multi-Agent Reinforcement Learning with Selective Parameter Sharing},\n author = {Christianos, Filippos and Papoudakis, Georgios and Rahman, Muhammad A and Albrecht, Stefano V},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1989--1998},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/christianos21a/christianos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/christianos21a.html},\n abstract = \t {Sharing parameters in multi-agent deep reinforcement learning has played an essential role in allowing algorithms to scale to a large number of agents. Parameter sharing between agents significantly decreases the number of trainable parameters, shortening training times to tractable levels, and has been linked to more efficient learning. However, having all agents share the same parameters can also have a detrimental effect on learning. We demonstrate the impact of parameter sharing methods on training speed and converged returns, establishing that when applied indiscriminately, their effectiveness is highly dependent on the environment. We propose a novel method to automatically identify agents which may benefit from sharing parameters by partitioning them based on their abilities and goals. Our approach combines the increased sample efficiency of parameter sharing with the representational capacity of multiple independent networks to reduce training time and increase final returns.}\n}", "pdf": "http://proceedings.mlr.press/v139/christianos21a/christianos21a.pdf", "supp": "", "pdf_size": 4669595, "gs_citation": 168, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5803292243518473578&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "School of Informatics, University of Edinburgh, Edinburgh, United Kingdom; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom", "aff_domain": "ed.ac.uk; ; ; ", "email": "ed.ac.uk; ; ; ", "github": "https://github.com/uoe-agents/seps", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/christianos21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "School of Informatics", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Scaling Properties of Deep Residual Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10287", "id": "10287", "proceeding": "http://proceedings.mlr.press/v139/cohen21b.html", "slides": "", "author_site": "Alain-Sam Cohen, Rama Cont, Alain Rossier, Renyuan Xu", "author": "Alain-Sam Cohen; Rama Cont; Alain Rossier; Renyuan Xu", "abstract": "Residual networks (ResNets) have displayed impressive results in pattern recognition and, recently, have garnered considerable theoretical interest due to a perceived link with neural ordinary differential equations (neural ODEs). This link relies on the convergence of network weights to a smooth function as the number of layers increases. We investigate the properties of weights trained by stochastic gradient descent and their scaling with network depth through detailed numerical experiments. We observe the existence of scaling regimes markedly different from those assumed in neural ODE literature. Depending on certain features of the network architecture, such as the smoothness of the activation function, one may obtain an alternative ODE limit, a stochastic differential equation or neither of these. These findings cast doubts on the validity of the neural ODE model as an adequate asymptotic description of deep ResNets and point to an alternative class of differential equations as a better description of the deep network limit.", "bibtex": "@InProceedings{pmlr-v139-cohen21b,\n title = \t {Scaling Properties of Deep Residual Networks},\n author = {Cohen, Alain-Sam and Cont, Rama and Rossier, Alain and Xu, Renyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2039--2048},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cohen21b/cohen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cohen21b.html},\n abstract = \t {Residual networks (ResNets) have displayed impressive results in pattern recognition and, recently, have garnered considerable theoretical interest due to a perceived link with neural ordinary differential equations (neural ODEs). This link relies on the convergence of network weights to a smooth function as the number of layers increases. We investigate the properties of weights trained by stochastic gradient descent and their scaling with network depth through detailed numerical experiments. We observe the existence of scaling regimes markedly different from those assumed in neural ODE literature. Depending on certain features of the network architecture, such as the smoothness of the activation function, one may obtain an alternative ODE limit, a stochastic differential equation or neither of these. These findings cast doubts on the validity of the neural ODE model as an adequate asymptotic description of deep ResNets and point to an alternative class of differential equations as a better description of the deep network limit.}\n}", "pdf": "http://proceedings.mlr.press/v139/cohen21b/cohen21b.pdf", "supp": "", "pdf_size": 854469, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8302805439596916242&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "InstaDeep; Mathematical Institute, University of Oxford; Mathematical Institute, University of Oxford; Mathematical Institute, University of Oxford", "aff_domain": "maths.ox.ac.uk; ; ; ", "email": "maths.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cohen21b.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "InstaDeep;University of Oxford", "aff_unique_dep": ";Mathematical Institute", "aff_unique_url": "https://www.instadeep.com;https://www.ox.ac.uk", "aff_unique_abbr": "InstaDeep;Oxford", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Oxford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10657", "id": "10657", "proceeding": "http://proceedings.mlr.press/v139/jia21b.html", "slides": "", "author_site": "Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, Tom Duerig", "author": "Chao Jia; Yinfei Yang; Ye Xia; Yi-Ting Chen; Zarana Parekh; Hieu Pham; Quoc Le; Yun-Hsuan Sung; Zhen Li; Tom Duerig", "abstract": "Pre-trained representations are becoming crucial for many NLP and perception tasks. While representation learning in NLP has transitioned to training on raw text without human annotations, visual and vision-language representations still rely heavily on curated training datasets that are expensive or require expert knowledge. For vision applications, representations are mostly learned using datasets with explicit class labels such as ImageNet or OpenImages. For vision-language, popular datasets like Conceptual Captions, MSCOCO, or CLIP all involve a non-trivial data collection (and cleaning) process. This costly curation process limits the size of datasets and hence hinders the scaling of trained models. In this paper, we leverage a noisy dataset of over one billion image alt-text pairs, obtained without expensive filtering or post-processing steps in the Conceptual Captions dataset. A simple dual-encoder architecture learns to align visual and language representations of the image and text pairs using a contrastive loss. We show that the scale of our corpus can make up for its noise and leads to state-of-the-art representations even with such a simple learning scheme. Our visual representation achieves strong performance when transferred to classification tasks such as ImageNet and VTAB. The aligned visual and language representations enables zero-shot image classification and also set new state-of-the-art results on Flickr30K and MSCOCO image-text retrieval benchmarks, even when compared with more sophisticated cross-attention models. The representations also enable cross-modality search with complex text and text + image queries.", "bibtex": "@InProceedings{pmlr-v139-jia21b,\n title = \t {Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision},\n author = {Jia, Chao and Yang, Yinfei and Xia, Ye and Chen, Yi-Ting and Parekh, Zarana and Pham, Hieu and Le, Quoc and Sung, Yun-Hsuan and Li, Zhen and Duerig, Tom},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4904--4916},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jia21b/jia21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jia21b.html},\n abstract = \t {Pre-trained representations are becoming crucial for many NLP and perception tasks. While representation learning in NLP has transitioned to training on raw text without human annotations, visual and vision-language representations still rely heavily on curated training datasets that are expensive or require expert knowledge. For vision applications, representations are mostly learned using datasets with explicit class labels such as ImageNet or OpenImages. For vision-language, popular datasets like Conceptual Captions, MSCOCO, or CLIP all involve a non-trivial data collection (and cleaning) process. This costly curation process limits the size of datasets and hence hinders the scaling of trained models. In this paper, we leverage a noisy dataset of over one billion image alt-text pairs, obtained without expensive filtering or post-processing steps in the Conceptual Captions dataset. A simple dual-encoder architecture learns to align visual and language representations of the image and text pairs using a contrastive loss. We show that the scale of our corpus can make up for its noise and leads to state-of-the-art representations even with such a simple learning scheme. Our visual representation achieves strong performance when transferred to classification tasks such as ImageNet and VTAB. The aligned visual and language representations enables zero-shot image classification and also set new state-of-the-art results on Flickr30K and MSCOCO image-text retrieval benchmarks, even when compared with more sophisticated cross-attention models. The representations also enable cross-modality search with complex text and text + image queries.}\n}", "pdf": "http://proceedings.mlr.press/v139/jia21b/jia21b.pdf", "supp": "", "pdf_size": 1918271, "gs_citation": 4467, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18080118691192431295&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", "aff_domain": "google.com;google.com; ; ; ; ; ; ; ; ", "email": "google.com;google.com; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v139/jia21b.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Segmenting Hybrid Trajectories using Latent ODEs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9545", "id": "9545", "proceeding": "http://proceedings.mlr.press/v139/shi21c.html", "slides": "/media/icml-2021/Slides/9545.pdf", "author_site": "Ruian Shi, Quaid Morris", "author": "Ruian Shi; Quaid Morris", "abstract": "Smooth dynamics interrupted by discontinuities are known as hybrid systems and arise commonly in nature. Latent ODEs allow for powerful representation of irregularly sampled time series but are not designed to capture trajectories arising from hybrid systems. Here, we propose the Latent Segmented ODE (LatSegODE), which uses Latent ODEs to perform reconstruction and changepoint detection within hybrid trajectories featuring jump discontinuities and switching dynamical modes. Where it is possible to train a Latent ODE on the smooth dynamical flows between discontinuities, we apply the pruned exact linear time (PELT) algorithm to detect changepoints where latent dynamics restart, thereby maximizing the joint probability of a piece-wise continuous latent dynamical representation. We propose usage of the marginal likelihood as a score function for PELT, circumventing the need for model-complexity-based penalization. The LatSegODE outperforms baselines in reconstructive and segmentation tasks including synthetic data sets of sine waves, Lotka Volterra dynamics, and UCI Character Trajectories.", "bibtex": "@InProceedings{pmlr-v139-shi21c,\n title = \t {Segmenting Hybrid Trajectories using Latent ODEs},\n author = {Shi, Ruian and Morris, Quaid},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9569--9579},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shi21c/shi21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/shi21c.html},\n abstract = \t {Smooth dynamics interrupted by discontinuities are known as hybrid systems and arise commonly in nature. Latent ODEs allow for powerful representation of irregularly sampled time series but are not designed to capture trajectories arising from hybrid systems. Here, we propose the Latent Segmented ODE (LatSegODE), which uses Latent ODEs to perform reconstruction and changepoint detection within hybrid trajectories featuring jump discontinuities and switching dynamical modes. Where it is possible to train a Latent ODE on the smooth dynamical flows between discontinuities, we apply the pruned exact linear time (PELT) algorithm to detect changepoints where latent dynamics restart, thereby maximizing the joint probability of a piece-wise continuous latent dynamical representation. We propose usage of the marginal likelihood as a score function for PELT, circumventing the need for model-complexity-based penalization. The LatSegODE outperforms baselines in reconstructive and segmentation tasks including synthetic data sets of sine waves, Lotka Volterra dynamics, and UCI Character Trajectories.}\n}", "pdf": "http://proceedings.mlr.press/v139/shi21c/shi21c.pdf", "supp": "", "pdf_size": 2098822, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3100416464972434904&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Toronto + Vector Institute, Toronto; University of Toronto + Vector Institute, Toronto + Memorial Sloan Kettering Cancer Center", "aff_domain": "mail.utoronto.ca; ", "email": "mail.utoronto.ca; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/shi21c.html", "aff_unique_index": "0+1;0+1+2", "aff_unique_norm": "University of Toronto;Vector Institute;Memorial Sloan Kettering Cancer Center", "aff_unique_dep": ";;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai;https://www.mskcc.org", "aff_unique_abbr": "U of T;Vector Institute;MSKCC", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Toronto", "aff_country_unique_index": "0+0;0+0+1", "aff_country_unique": "Canada;United States" }, { "title": "Selecting Data Augmentation for Simulating Interventions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8887", "id": "8887", "proceeding": "http://proceedings.mlr.press/v139/ilse21a.html", "slides": "/media/icml-2021/Slides/8887.pdf", "author_site": "Maximilian Ilse, Jakub Tomczak, Patrick Forr\u00e9", "author": "Maximilian Ilse; Jakub M Tomczak; Patrick Forr\u00e9", "abstract": "Machine learning models trained with purely observational data and the principle of empirical risk minimization (Vapnik 1992) can fail to generalize to unseen domains. In this paper, we focus on the case where the problem arises through spurious correlation between the observed domains and the actual task labels. We find that many domain generalization methods do not explicitly take this spurious correlation into account. Instead, especially in more application-oriented research areas like medical imaging or robotics, data augmentation techniques that are based on heuristics are used to learn domain invariant features. To bridge the gap between theory and practice, we develop a causal perspective on the problem of domain generalization. We argue that causal concepts can be used to explain the success of data augmentation by describing how they can weaken the spurious correlation between the observed domains and the task labels. We demonstrate that data augmentation can serve as a tool for simulating interventional data. We use these theoretical insights to derive a simple algorithm that is able to select data augmentation techniques that will lead to better domain generalization.", "bibtex": "@InProceedings{pmlr-v139-ilse21a,\n title = \t {Selecting Data Augmentation for Simulating Interventions},\n author = {Ilse, Maximilian and Tomczak, Jakub M and Forr{\\'e}, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4555--4562},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ilse21a/ilse21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ilse21a.html},\n abstract = \t {Machine learning models trained with purely observational data and the principle of empirical risk minimization (Vapnik 1992) can fail to generalize to unseen domains. In this paper, we focus on the case where the problem arises through spurious correlation between the observed domains and the actual task labels. We find that many domain generalization methods do not explicitly take this spurious correlation into account. Instead, especially in more application-oriented research areas like medical imaging or robotics, data augmentation techniques that are based on heuristics are used to learn domain invariant features. To bridge the gap between theory and practice, we develop a causal perspective on the problem of domain generalization. We argue that causal concepts can be used to explain the success of data augmentation by describing how they can weaken the spurious correlation between the observed domains and the task labels. We demonstrate that data augmentation can serve as a tool for simulating interventional data. We use these theoretical insights to derive a simple algorithm that is able to select data augmentation techniques that will lead to better domain generalization.}\n}", "pdf": "http://proceedings.mlr.press/v139/ilse21a/ilse21a.pdf", "supp": "", "pdf_size": 420866, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3812556752145273819&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Amsterdam Machine Learning Lab, University of Amsterdam; Computational Intelligence Group, Vrije Universiteit Amsterdam; Amsterdam Machine Learning Lab, University of Amsterdam", "aff_domain": "uva.nl; ; ", "email": "uva.nl; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ilse21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Amsterdam;Vrije Universiteit Amsterdam", "aff_unique_dep": "Amsterdam Machine Learning Lab;Computational Intelligence Group", "aff_unique_url": "https://www.uva.nl;https://www.vu.nl", "aff_unique_abbr": "UvA;VU Amsterdam", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Amsterdam;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Netherlands" }, { "title": "Self Normalizing Flows", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10717", "id": "10717", "proceeding": "http://proceedings.mlr.press/v139/keller21a.html", "slides": "/media/icml-2021/Slides/10717.pdf", "author_site": "T. Anderson Keller, Jorn Peters, Priyank Jaini, Emiel Hoogeboom, Patrick Forr\u00e9, Max Welling", "author": "Thomas A Keller; Jorn W.T. Peters; Priyank Jaini; Emiel Hoogeboom; Patrick Forr\u00e9; Max Welling", "abstract": "Efficient gradient computation of the Jacobian determinant term is a core problem in many machine learning settings, and especially so in the normalizing flow framework. Most proposed flow models therefore either restrict to a function class with easy evaluation of the Jacobian determinant, or an efficient estimator thereof. However, these restrictions limit the performance of such density models, frequently requiring significant depth to reach desired performance levels. In this work, we propose \\emph{Self Normalizing Flows}, a flexible framework for training normalizing flows by replacing expensive terms in the gradient by learned approximate inverses at each layer. This reduces the computational complexity of each layer\u2019s exact update from $\\mathcal{O}(D^3)$ to $\\mathcal{O}(D^2)$, allowing for the training of flow architectures which were otherwise computationally infeasible, while also providing efficient sampling. We show experimentally that such models are remarkably stable and optimize to similar data likelihood values as their exact gradient counterparts, while training more quickly and surpassing the performance of functionally constrained counterparts.", "bibtex": "@InProceedings{pmlr-v139-keller21a,\n title = \t {Self Normalizing Flows},\n author = {Keller, Thomas A and Peters, Jorn W.T. and Jaini, Priyank and Hoogeboom, Emiel and Forr{\\'e}, Patrick and Welling, Max},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5378--5387},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/keller21a/keller21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/keller21a.html},\n abstract = \t {Efficient gradient computation of the Jacobian determinant term is a core problem in many machine learning settings, and especially so in the normalizing flow framework. Most proposed flow models therefore either restrict to a function class with easy evaluation of the Jacobian determinant, or an efficient estimator thereof. However, these restrictions limit the performance of such density models, frequently requiring significant depth to reach desired performance levels. In this work, we propose \\emph{Self Normalizing Flows}, a flexible framework for training normalizing flows by replacing expensive terms in the gradient by learned approximate inverses at each layer. This reduces the computational complexity of each layer\u2019s exact update from $\\mathcal{O}(D^3)$ to $\\mathcal{O}(D^2)$, allowing for the training of flow architectures which were otherwise computationally infeasible, while also providing efficient sampling. We show experimentally that such models are remarkably stable and optimize to similar data likelihood values as their exact gradient counterparts, while training more quickly and surpassing the performance of functionally constrained counterparts.}\n}", "pdf": "http://proceedings.mlr.press/v139/keller21a/keller21a.pdf", "supp": "", "pdf_size": 3628325, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16907220136527385464&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "UvA-Bosch Delta Lab+University of Amsterdam, Netherlands; UvA-Bosch Delta Lab+University of Amsterdam, Netherlands; UvA-Bosch Delta Lab+University of Amsterdam, Netherlands; UvA-Bosch Delta Lab+University of Amsterdam, Netherlands; University of Amsterdam, Netherlands; University of Amsterdam, Netherlands", "aff_domain": "gmail.com; ; ; ; ; ", "email": "gmail.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/keller21a.html", "aff_unique_index": "0+0;0+0;0+0;0+0;0;0", "aff_unique_norm": "University of Amsterdam", "aff_unique_dep": "Bosch Delta Lab", "aff_unique_url": "https://www.uva.nl", "aff_unique_abbr": "UvA", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0", "aff_country_unique": "Netherlands" }, { "title": "Self-Damaging Contrastive Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10081", "id": "10081", "proceeding": "http://proceedings.mlr.press/v139/jiang21a.html", "slides": "/media/icml-2021/Slides/10081.pdf", "author_site": "Ziyu Jiang, Tianlong Chen, Bobak Mortazavi, Zhangyang \u201cAtlas\u201d Wang", "author": "Ziyu Jiang; Tianlong Chen; Bobak J Mortazavi; Zhangyang Wang", "abstract": "The recent breakthrough achieved by contrastive learning accelerates the pace for deploying unsupervised training on real-world data applications. However, unlabeled data in reality is commonly imbalanced and shows a long-tail distribution, and it is unclear how robustly the latest contrastive learning methods could perform in the practical scenario. This paper proposes to explicitly tackle this challenge, via a principled framework called Self-Damaging Contrastive Learning (SDCLR), to automatically balance the representation learning without knowing the classes. Our main inspiration is drawn from the recent finding that deep models have difficult-to-memorize samples, and those may be exposed through network pruning. It is further natural to hypothesize that long-tail samples are also tougher for the model to learn well due to insufficient examples. Hence, the key innovation in SDCLR is to create a dynamic self-competitor model to contrast with the target model, which is a pruned version of the latter. During training, contrasting the two models will lead to adaptive online mining of the most easily forgotten samples for the current target model, and implicitly emphasize them more in the contrastive loss. Extensive experiments across multiple datasets and imbalance settings show that SDCLR significantly improves not only overall accuracies but also balancedness, in terms of linear evaluation on the full-shot and few-shot settings. Our code is available at https://github.com/VITA-Group/SDCLR.", "bibtex": "@InProceedings{pmlr-v139-jiang21a,\n title = \t {Self-Damaging Contrastive Learning},\n author = {Jiang, Ziyu and Chen, Tianlong and Mortazavi, Bobak J and Wang, Zhangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4927--4939},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21a/jiang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21a.html},\n abstract = \t {The recent breakthrough achieved by contrastive learning accelerates the pace for deploying unsupervised training on real-world data applications. However, unlabeled data in reality is commonly imbalanced and shows a long-tail distribution, and it is unclear how robustly the latest contrastive learning methods could perform in the practical scenario. This paper proposes to explicitly tackle this challenge, via a principled framework called Self-Damaging Contrastive Learning (SDCLR), to automatically balance the representation learning without knowing the classes. Our main inspiration is drawn from the recent finding that deep models have difficult-to-memorize samples, and those may be exposed through network pruning. It is further natural to hypothesize that long-tail samples are also tougher for the model to learn well due to insufficient examples. Hence, the key innovation in SDCLR is to create a dynamic self-competitor model to contrast with the target model, which is a pruned version of the latter. During training, contrasting the two models will lead to adaptive online mining of the most easily forgotten samples for the current target model, and implicitly emphasize them more in the contrastive loss. Extensive experiments across multiple datasets and imbalance settings show that SDCLR significantly improves not only overall accuracies but also balancedness, in terms of linear evaluation on the full-shot and few-shot settings. Our code is available at https://github.com/VITA-Group/SDCLR.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21a/jiang21a.pdf", "supp": "", "pdf_size": 2917596, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16794370267246676640&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Texas A&M University; University of Texas at Austin; Texas A&M University; University of Texas at Austin", "aff_domain": "tamu.edu;utexas.edu;tamu.edu;utexas.edu", "email": "tamu.edu;utexas.edu;tamu.edu;utexas.edu", "github": "https://github.com/VITA-Group/SDCLR", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/jiang21a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "Texas A&M University;University of Texas at Austin", "aff_unique_dep": ";", "aff_unique_url": "https://www.tamu.edu;https://www.utexas.edu", "aff_unique_abbr": "TAMU;UT Austin", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Self-Improved Retrosynthetic Planning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10749", "id": "10749", "proceeding": "http://proceedings.mlr.press/v139/kim21b.html", "slides": "/media/icml-2021/Slides/10749.pdf", "author_site": "Junsu Kim, Sungsoo Ahn, Hankook Lee, Jinwoo Shin", "author": "Junsu Kim; Sungsoo Ahn; Hankook Lee; Jinwoo Shin", "abstract": "Retrosynthetic planning is a fundamental problem in chemistry for finding a pathway of reactions to synthesize a target molecule. Recently, search algorithms have shown promising results for solving this problem by using deep neural networks (DNNs) to expand their candidate solutions, i.e., adding new reactions to reaction pathways. However, the existing works on this line are suboptimal; the retrosynthetic planning problem requires the reaction pathways to be (a) represented by real-world reactions and (b) executable using \u201cbuilding block\u201d molecules, yet the DNNs expand reaction pathways without fully incorporating such requirements. Motivated by this, we propose an end-to-end framework for directly training the DNNs towards generating reaction pathways with the desirable properties. Our main idea is based on a self-improving procedure that trains the model to imitate successful trajectories found by itself. We also propose a novel reaction augmentation scheme based on a forward reaction model. Our experiments demonstrate that our scheme significantly improves the success rate of solving the retrosynthetic problem from 86.84% to 96.32% while maintaining the performance of DNN for predicting valid reactions.", "bibtex": "@InProceedings{pmlr-v139-kim21b,\n title = \t {Self-Improved Retrosynthetic Planning},\n author = {Kim, Junsu and Ahn, Sungsoo and Lee, Hankook and Shin, Jinwoo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5486--5495},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21b/kim21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21b.html},\n abstract = \t {Retrosynthetic planning is a fundamental problem in chemistry for finding a pathway of reactions to synthesize a target molecule. Recently, search algorithms have shown promising results for solving this problem by using deep neural networks (DNNs) to expand their candidate solutions, i.e., adding new reactions to reaction pathways. However, the existing works on this line are suboptimal; the retrosynthetic planning problem requires the reaction pathways to be (a) represented by real-world reactions and (b) executable using \u201cbuilding block\u201d molecules, yet the DNNs expand reaction pathways without fully incorporating such requirements. Motivated by this, we propose an end-to-end framework for directly training the DNNs towards generating reaction pathways with the desirable properties. Our main idea is based on a self-improving procedure that trains the model to imitate successful trajectories found by itself. We also propose a novel reaction augmentation scheme based on a forward reaction model. Our experiments demonstrate that our scheme significantly improves the success rate of solving the retrosynthetic problem from 86.84% to 96.32% while maintaining the performance of DNN for predicting valid reactions.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21b/kim21b.pdf", "supp": "", "pdf_size": 813547, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18216216524696929776&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Korea Advanced Institute of Science and Technology (KAIST)+Mohamed bin Zayed University of Artificial Intelligence (MBZUAI); Mohamed bin Zayed University of Artificial Intelligence (MBZUAI); Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST)", "aff_domain": "kaist.ac.kr; ; ; ", "email": "kaist.ac.kr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kim21b.html", "aff_unique_index": "0+1;1;0;0", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Mohamed bin Zayed University of Artificial Intelligence", "aff_unique_dep": ";", "aff_unique_url": "https://www.kaist.ac.kr;https://www.mbzuai.ac.ae", "aff_unique_abbr": "KAIST;MBZUAI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;0;0", "aff_country_unique": "South Korea;United Arab Emirates" }, { "title": "Self-Paced Context Evaluation for Contextual Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9685", "id": "9685", "proceeding": "http://proceedings.mlr.press/v139/eimer21a.html", "slides": "/media/icml-2021/Slides/9685.pdf", "author_site": "Theresa Eimer, Andr\u00e9 Biedenkapp, Frank Hutter, Marius Lindauer", "author": "Theresa Eimer; Andr\u00e9 Biedenkapp; Frank Hutter; Marius Lindauer", "abstract": "Reinforcement learning (RL) has made a lot of advances for solving a single problem in a given environment; but learning policies that generalize to unseen variations of a problem remains challenging. To improve sample efficiency for learning on such instances of a problem domain, we present Self-Paced Context Evaluation (SPaCE). Based on self-paced learning, SPaCE automatically generates instance curricula online with little computational overhead. To this end, SPaCE leverages information contained in state values during training to accelerate and improve training performance as well as generalization capabilities to new \\tasks from the same problem domain. Nevertheless, SPaCE is independent of the problem domain at hand and can be applied on top of any RL agent with state-value function approximation. We demonstrate SPaCE\u2019s ability to speed up learning of different value-based RL agents on two environments, showing better generalization capabilities and up to 10x faster learning compared to naive approaches such as round robin or SPDRL, as the closest state-of-the-art approach.", "bibtex": "@InProceedings{pmlr-v139-eimer21a,\n title = \t {Self-Paced Context Evaluation for Contextual Reinforcement Learning},\n author = {Eimer, Theresa and Biedenkapp, Andr{\\'e} and Hutter, Frank and Lindauer, Marius},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2948--2958},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/eimer21a/eimer21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/eimer21a.html},\n abstract = \t {Reinforcement learning (RL) has made a lot of advances for solving a single problem in a given environment; but learning policies that generalize to unseen variations of a problem remains challenging. To improve sample efficiency for learning on such instances of a problem domain, we present Self-Paced Context Evaluation (SPaCE). Based on self-paced learning, SPaCE automatically generates instance curricula online with little computational overhead. To this end, SPaCE leverages information contained in state values during training to accelerate and improve training performance as well as generalization capabilities to new \\tasks from the same problem domain. Nevertheless, SPaCE is independent of the problem domain at hand and can be applied on top of any RL agent with state-value function approximation. We demonstrate SPaCE\u2019s ability to speed up learning of different value-based RL agents on two environments, showing better generalization capabilities and up to 10x faster learning compared to naive approaches such as round robin or SPDRL, as the closest state-of-the-art approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/eimer21a/eimer21a.pdf", "supp": "", "pdf_size": 984865, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18295369493204614247&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Information Processing Institute (tnt), Leibniz University Hannover, Germany; Department of Computer Science, University of Freiburg, Germany; Department of Computer Science, University of Freiburg, Germany + Bosch Center for Arti\ufb01cial Intelli-gence, Renningen, Germany; Information Processing Institute (tnt), Leibniz University Hannover, Germany", "aff_domain": "tnt.uni-hannover.de; ; ; ", "email": "tnt.uni-hannover.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/eimer21a.html", "aff_unique_index": "0;1;1+2;0", "aff_unique_norm": "Leibniz University Hannover;University of Freiburg;Bosch Center for Artificial Intelligence", "aff_unique_dep": "Information Processing Institute;Department of Computer Science;Center for Artificial Intelligence", "aff_unique_url": "https://www.leibniz-university-hannover.de;https://www.uni-freiburg.de;https://www.bosch-ai.com", "aff_unique_abbr": ";;BCAI", "aff_campus_unique_index": "1", "aff_campus_unique": ";Renningen", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "Germany" }, { "title": "Self-Tuning for Data-Efficient Deep Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8615", "id": "8615", "proceeding": "http://proceedings.mlr.press/v139/wang21g.html", "slides": "/media/icml-2021/Slides/8615.pdf", "author_site": "Ximei Wang, Jinghan Gao, Mingsheng Long, Jianmin Wang", "author": "Ximei Wang; Jinghan Gao; Mingsheng Long; Jianmin Wang", "abstract": "Deep learning has made revolutionary advances to diverse applications in the presence of large-scale labeled datasets. However, it is prohibitively time-costly and labor-expensive to collect sufficient labeled data in most realistic scenarios. To mitigate the requirement for labeled data, semi-supervised learning (SSL) focuses on simultaneously exploring both labeled and unlabeled data, while transfer learning (TL) popularizes a favorable practice of fine-tuning a pre-trained model to the target data. A dilemma is thus encountered: Without a decent pre-trained model to provide an implicit regularization, SSL through self-training from scratch will be easily misled by inaccurate pseudo-labels, especially in large-sized label space; Without exploring the intrinsic structure of unlabeled data, TL through fine-tuning from limited labeled data is at risk of under-transfer caused by model shift. To escape from this dilemma, we present Self-Tuning to enable data-efficient deep learning by unifying the exploration of labeled and unlabeled data and the transfer of a pre-trained model, as well as a Pseudo Group Contrast (PGC) mechanism to mitigate the reliance on pseudo-labels and boost the tolerance to false labels. Self-Tuning outperforms its SSL and TL counterparts on five tasks by sharp margins, e.g. it doubles the accuracy of fine-tuning on Cars with $15%$ labels.", "bibtex": "@InProceedings{pmlr-v139-wang21g,\n title = \t {Self-Tuning for Data-Efficient Deep Learning},\n author = {Wang, Ximei and Gao, Jinghan and Long, Mingsheng and Wang, Jianmin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10738--10748},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21g/wang21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21g.html},\n abstract = \t {Deep learning has made revolutionary advances to diverse applications in the presence of large-scale labeled datasets. However, it is prohibitively time-costly and labor-expensive to collect sufficient labeled data in most realistic scenarios. To mitigate the requirement for labeled data, semi-supervised learning (SSL) focuses on simultaneously exploring both labeled and unlabeled data, while transfer learning (TL) popularizes a favorable practice of fine-tuning a pre-trained model to the target data. A dilemma is thus encountered: Without a decent pre-trained model to provide an implicit regularization, SSL through self-training from scratch will be easily misled by inaccurate pseudo-labels, especially in large-sized label space; Without exploring the intrinsic structure of unlabeled data, TL through fine-tuning from limited labeled data is at risk of under-transfer caused by model shift. To escape from this dilemma, we present Self-Tuning to enable data-efficient deep learning by unifying the exploration of labeled and unlabeled data and the transfer of a pre-trained model, as well as a Pseudo Group Contrast (PGC) mechanism to mitigate the reliance on pseudo-labels and boost the tolerance to false labels. Self-Tuning outperforms its SSL and TL counterparts on five tasks by sharp margins, e.g. it doubles the accuracy of fine-tuning on Cars with $15%$ labels.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21g/wang21g.pdf", "supp": "", "pdf_size": 652664, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3161082086338934038&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Software, BNRist, Tsinghua University, Beijing, China, 100084; School of Software, BNRist, Tsinghua University, Beijing, China, 100084; School of Software, BNRist, Tsinghua University, Beijing, China, 100084; School of Software, BNRist, Tsinghua University, Beijing, China, 100084", "aff_domain": "mails.tsinghua.edu.cn; ;tsinghua.edu.cn; ", "email": "mails.tsinghua.edu.cn; ;tsinghua.edu.cn; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21g.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "School of Software", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Self-supervised Graph-level Representation Learning with Local and Global Structure", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10691", "id": "10691", "proceeding": "http://proceedings.mlr.press/v139/xu21g.html", "slides": "/media/icml-2021/Slides/10691.pdf", "author_site": "Minghao Xu, Hang Wang, Bingbing Ni, Hongyu Guo, Jian Tang", "author": "Minghao Xu; Hang Wang; Bingbing Ni; Hongyu Guo; Jian Tang", "abstract": "This paper studies unsupervised/self-supervised whole-graph representation learning, which is critical in many tasks such as molecule properties prediction in drug and material discovery. Existing methods mainly focus on preserving the local similarity structure between different graph instances but fail to discover the global semantic structure of the entire data set. In this paper, we propose a unified framework called Local-instance and Global-semantic Learning (GraphLoG) for self-supervised whole-graph representation learning. Specifically, besides preserving the local similarities, GraphLoG introduces the hierarchical prototypes to capture the global semantic clusters. An efficient online expectation-maximization (EM) algorithm is further developed for learning the model. We evaluate GraphLoG by pre-training it on massive unlabeled graphs followed by fine-tuning on downstream tasks. Extensive experiments on both chemical and biological benchmark data sets demonstrate the effectiveness of the proposed approach.", "bibtex": "@InProceedings{pmlr-v139-xu21g,\n title = \t {Self-supervised Graph-level Representation Learning with Local and Global Structure},\n author = {Xu, Minghao and Wang, Hang and Ni, Bingbing and Guo, Hongyu and Tang, Jian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11548--11558},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21g/xu21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21g.html},\n abstract = \t {This paper studies unsupervised/self-supervised whole-graph representation learning, which is critical in many tasks such as molecule properties prediction in drug and material discovery. Existing methods mainly focus on preserving the local similarity structure between different graph instances but fail to discover the global semantic structure of the entire data set. In this paper, we propose a unified framework called Local-instance and Global-semantic Learning (GraphLoG) for self-supervised whole-graph representation learning. Specifically, besides preserving the local similarities, GraphLoG introduces the hierarchical prototypes to capture the global semantic clusters. An efficient online expectation-maximization (EM) algorithm is further developed for learning the model. We evaluate GraphLoG by pre-training it on massive unlabeled graphs followed by fine-tuning on downstream tasks. Extensive experiments on both chemical and biological benchmark data sets demonstrate the effectiveness of the proposed approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21g/xu21g.pdf", "supp": "", "pdf_size": 1088368, "gs_citation": 268, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15360735332012817623&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; National Research Council Canada; Mila - Quebec AI Institute + CIFAR AI Research Chair + HEC Montr \u00b4eal", "aff_domain": "sjtu.edu.cn; ; ; ;hec.ca", "email": "sjtu.edu.cn; ; ; ;hec.ca", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/xu21g.html", "aff_unique_index": "0;0;0;1;2+3+4", "aff_unique_norm": "Shanghai Jiao Tong University;National Research Council Canada;Quebec AI Institute;CIFAR;HEC Montr\u00e9al", "aff_unique_dep": ";;AI Institute;AI Research;", "aff_unique_url": "https://www.sjtu.edu.cn;https://www.nrc-cnrc.gc.ca;https://mila.quebec;https://www.cifar.ca;https://www.hec.ca", "aff_unique_abbr": "SJTU;NRC-CNRC;Mila;CIFAR;HEC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;1+1+1", "aff_country_unique": "China;Canada" }, { "title": "Self-supervised and Supervised Joint Training for Resource-rich Machine Translation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9673", "id": "9673", "proceeding": "http://proceedings.mlr.press/v139/cheng21b.html", "slides": "", "author_site": "Yong Cheng, Wei Wang, Lu Jiang, Wolfgang Macherey", "author": "Yong Cheng; Wei Wang; Lu Jiang; Wolfgang Macherey", "abstract": "Self-supervised pre-training of text representations has been successfully applied to low-resource Neural Machine Translation (NMT). However, it usually fails to achieve notable gains on resource-rich NMT. In this paper, we propose a joint training approach, F2-XEnDec, to combine self-supervised and supervised learning to optimize NMT models. To exploit complementary self-supervised signals for supervised learning, NMT models are trained on examples that are interbred from monolingual and parallel sentences through a new process called crossover encoder-decoder. Experiments on two resource-rich translation benchmarks, WMT\u201914 English-German and WMT\u201914 English-French, demonstrate that our approach achieves substantial improvements over several strong baseline methods and obtains a new state of the art of 46.19 BLEU on English-French when incorporating back translation. Results also show that our approach is capable of improving model robustness to input perturbations such as code-switching noise which frequently appears on the social media.", "bibtex": "@InProceedings{pmlr-v139-cheng21b,\n title = \t {Self-supervised and Supervised Joint Training for Resource-rich Machine Translation},\n author = {Cheng, Yong and Wang, Wei and Jiang, Lu and Macherey, Wolfgang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1825--1835},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cheng21b/cheng21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/cheng21b.html},\n abstract = \t {Self-supervised pre-training of text representations has been successfully applied to low-resource Neural Machine Translation (NMT). However, it usually fails to achieve notable gains on resource-rich NMT. In this paper, we propose a joint training approach, F2-XEnDec, to combine self-supervised and supervised learning to optimize NMT models. To exploit complementary self-supervised signals for supervised learning, NMT models are trained on examples that are interbred from monolingual and parallel sentences through a new process called crossover encoder-decoder. Experiments on two resource-rich translation benchmarks, WMT\u201914 English-German and WMT\u201914 English-French, demonstrate that our approach achieves substantial improvements over several strong baseline methods and obtains a new state of the art of 46.19 BLEU on English-French when incorporating back translation. Results also show that our approach is capable of improving model robustness to input perturbations such as code-switching noise which frequently appears on the social media.}\n}", "pdf": "http://proceedings.mlr.press/v139/cheng21b/cheng21b.pdf", "supp": "", "pdf_size": 562341, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=157816153944296093&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research, Google LLC, USA+Language Technologies Institute, Carnegie Mellon University, Pittsburgh, Pennsylvania; Google Research; Google Research, Google LLC, USA; Google Research", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cheng21b.html", "aff_unique_index": "0+1;0;0;0", "aff_unique_norm": "Google;Carnegie Mellon University", "aff_unique_dep": "Google Research;Language Technologies Institute", "aff_unique_url": "https://research.google;https://www.cmu.edu", "aff_unique_abbr": "Google;CMU", "aff_campus_unique_index": "0+1;0;0;0", "aff_campus_unique": "Mountain View;Pittsburgh", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Selfish Sparse RNN Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9375", "id": "9375", "proceeding": "http://proceedings.mlr.press/v139/liu21p.html", "slides": "", "author_site": "Shiwei Liu, Decebal Mocanu, Yulong Pei, Mykola Pechenizkiy", "author": "Shiwei Liu; Decebal Constantin Mocanu; Yulong Pei; Mykola Pechenizkiy", "abstract": "Sparse neural networks have been widely applied to reduce the computational demands of training and deploying over-parameterized deep neural networks. For inference acceleration, methods that discover a sparse network from a pre-trained dense network (dense-to-sparse training) work effectively. Recently, dynamic sparse training (DST) has been proposed to train sparse neural networks without pre-training a dense model (sparse-to-sparse training), so that the training process can also be accelerated. However, previous sparse-to-sparse methods mainly focus on Multilayer Perceptron Networks (MLPs) and Convolutional Neural Networks (CNNs), failing to match the performance of dense-to-sparse methods in the Recurrent Neural Networks (RNNs) setting. In this paper, we propose an approach to train intrinsically sparse RNNs with a fixed parameter count in one single run, without compromising performance. During training, we allow RNN layers to have a non-uniform redistribution across cell gates for better regularization. Further, we propose SNT-ASGD, a novel variant of the averaged stochastic gradient optimizer, which significantly improves the performance of all sparse training methods for RNNs. Using these strategies, we achieve state-of-the-art sparse training results, better than the dense-to-sparse methods, with various types of RNNs on Penn TreeBank and Wikitext-2 datasets. Our codes are available at https://github.com/Shiweiliuiiiiiii/Selfish-RNN.", "bibtex": "@InProceedings{pmlr-v139-liu21p,\n title = \t {Selfish Sparse RNN Training},\n author = {Liu, Shiwei and Mocanu, Decebal Constantin and Pei, Yulong and Pechenizkiy, Mykola},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6893--6904},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21p/liu21p.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21p.html},\n abstract = \t {Sparse neural networks have been widely applied to reduce the computational demands of training and deploying over-parameterized deep neural networks. For inference acceleration, methods that discover a sparse network from a pre-trained dense network (dense-to-sparse training) work effectively. Recently, dynamic sparse training (DST) has been proposed to train sparse neural networks without pre-training a dense model (sparse-to-sparse training), so that the training process can also be accelerated. However, previous sparse-to-sparse methods mainly focus on Multilayer Perceptron Networks (MLPs) and Convolutional Neural Networks (CNNs), failing to match the performance of dense-to-sparse methods in the Recurrent Neural Networks (RNNs) setting. In this paper, we propose an approach to train intrinsically sparse RNNs with a fixed parameter count in one single run, without compromising performance. During training, we allow RNN layers to have a non-uniform redistribution across cell gates for better regularization. Further, we propose SNT-ASGD, a novel variant of the averaged stochastic gradient optimizer, which significantly improves the performance of all sparse training methods for RNNs. Using these strategies, we achieve state-of-the-art sparse training results, better than the dense-to-sparse methods, with various types of RNNs on Penn TreeBank and Wikitext-2 datasets. Our codes are available at https://github.com/Shiweiliuiiiiiii/Selfish-RNN.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21p/liu21p.pdf", "supp": "", "pdf_size": 901101, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14857851775115975297&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, Eindhoven University of Technology, the Netherlands+Faculty of Electrical Engineering, Mathematics, and Computer Science at University of Twente, the Netherlands; Department of Computer Science, Eindhoven University of Technology, the Netherlands; Department of Computer Science, Eindhoven University of Technology, the Netherlands; Department of Computer Science, Eindhoven University of Technology, the Netherlands", "aff_domain": "tue.nl; ; ; ", "email": "tue.nl; ; ; ", "github": "https://github.com/Shiweiliuiiiiiii/Selfish-RNN", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21p.html", "aff_unique_index": "0+1;0;0;0", "aff_unique_norm": "Eindhoven University of Technology;University of Twente", "aff_unique_dep": "Department of Computer Science;Faculty of Electrical Engineering, Mathematics, and Computer Science", "aff_unique_url": "https://www.tue.nl;https://www.utwente.nl", "aff_unique_abbr": "TU/e;UT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "Netherlands" }, { "title": "Sequential Domain Adaptation by Synthesizing Distributionally Robust Experts", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10743", "id": "10743", "proceeding": "http://proceedings.mlr.press/v139/taskesen21a.html", "slides": "/media/icml-2021/Slides/10743.pdf", "author_site": "Bahar Taskesen, Man-Chung Yue, Jose Blanchet, Daniel Kuhn, Viet Anh Nguyen", "author": "Bahar Taskesen; Man-Chung Yue; Jose Blanchet; Daniel Kuhn; Viet Anh Nguyen", "abstract": "Least squares estimators, when trained on few target domain samples, may predict poorly. Supervised domain adaptation aims to improve the predictive accuracy by exploiting additional labeled training samples from a source distribution that is close to the target distribution. Given available data, we investigate novel strategies to synthesize a family of least squares estimator experts that are robust with regard to moment conditions. When these moment conditions are specified using Kullback-Leibler or Wasserstein-type divergences, we can find the robust estimators efficiently using convex optimization. We use the Bernstein online aggregation algorithm on the proposed family of robust experts to generate predictions for the sequential stream of target test samples. Numerical experiments on real data show that the robust strategies systematically outperform non-robust interpolations of the empirical least squares estimators.", "bibtex": "@InProceedings{pmlr-v139-taskesen21a,\n title = \t {Sequential Domain Adaptation by Synthesizing Distributionally Robust Experts},\n author = {Taskesen, Bahar and Yue, Man-Chung and Blanchet, Jose and Kuhn, Daniel and Nguyen, Viet Anh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10162--10172},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/taskesen21a/taskesen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/taskesen21a.html},\n abstract = \t {Least squares estimators, when trained on few target domain samples, may predict poorly. Supervised domain adaptation aims to improve the predictive accuracy by exploiting additional labeled training samples from a source distribution that is close to the target distribution. Given available data, we investigate novel strategies to synthesize a family of least squares estimator experts that are robust with regard to moment conditions. When these moment conditions are specified using Kullback-Leibler or Wasserstein-type divergences, we can find the robust estimators efficiently using convex optimization. We use the Bernstein online aggregation algorithm on the proposed family of robust experts to generate predictions for the sequential stream of target test samples. Numerical experiments on real data show that the robust strategies systematically outperform non-robust interpolations of the empirical least squares estimators.}\n}", "pdf": "http://proceedings.mlr.press/v139/taskesen21a/taskesen21a.pdf", "supp": "", "pdf_size": 774359, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6930689921879255394&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Risk Analytics and Optimization Chair, \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne; Department of Applied Mathematics, The Hong Kong Polytechnic University; Department of Management Science and Engineering, Stanford University + VinAI Research, Vietnam; Risk Analytics and Optimization Chair, \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne; VinAI Research, Vietnam", "aff_domain": "epfl.ch; ; ; ; ", "email": "epfl.ch; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/taskesen21a.html", "aff_unique_index": "0;1;2+3;0;3", "aff_unique_norm": "EPFL;Hong Kong Polytechnic University;Stanford University;VinAI Research", "aff_unique_dep": "Risk Analytics and Optimization Chair;Department of Applied Mathematics;Department of Management Science and Engineering;", "aff_unique_url": "https://www.epfl.ch;https://www.polyu.edu.hk;https://www.stanford.edu;https://www.vin.ai", "aff_unique_abbr": "EPFL;PolyU;Stanford;VinAI", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";Hong Kong SAR;Stanford", "aff_country_unique_index": "0;1;2+3;0;3", "aff_country_unique": "Switzerland;China;United States;Vietnam" }, { "title": "Sharf: Shape-conditioned Radiance Fields from a Single View", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8913", "id": "8913", "proceeding": "http://proceedings.mlr.press/v139/rematas21a.html", "slides": "", "author_site": "Konstantinos Rematas, Ricardo Martin-Brualla, Vittorio Ferrari", "author": "Konstantinos Rematas; Ricardo Martin-Brualla; Vittorio Ferrari", "abstract": "We present a method for estimating neural scenes representations of objects given only a single image. The core of our method is the estimation of a geometric scaffold for the object and its use as a guide for the reconstruction of the underlying radiance field. Our formulation is based on a generative process that first maps a latent code to a voxelized shape, and then renders it to an image, with the object appearance being controlled by a second latent code. During inference, we optimize both the latent codes and the networks to fit a test image of a new object. The explicit disentanglement of shape and appearance allows our model to be fine-tuned given a single image. We can then render new views in a geometrically consistent manner and they represent faithfully the input object. Additionally, our method is able to generalize to images outside of the training domain (more realistic renderings and even real photographs). Finally, the inferred geometric scaffold is itself an accurate estimate of the object\u2019s 3D shape. We demonstrate in several experiments the effectiveness of our approach in both synthetic and real images.", "bibtex": "@InProceedings{pmlr-v139-rematas21a,\n title = \t {Sharf: Shape-conditioned Radiance Fields from a Single View},\n author = {Rematas, Konstantinos and Martin-Brualla, Ricardo and Ferrari, Vittorio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8948--8958},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rematas21a/rematas21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rematas21a.html},\n abstract = \t {We present a method for estimating neural scenes representations of objects given only a single image. The core of our method is the estimation of a geometric scaffold for the object and its use as a guide for the reconstruction of the underlying radiance field. Our formulation is based on a generative process that first maps a latent code to a voxelized shape, and then renders it to an image, with the object appearance being controlled by a second latent code. During inference, we optimize both the latent codes and the networks to fit a test image of a new object. The explicit disentanglement of shape and appearance allows our model to be fine-tuned given a single image. We can then render new views in a geometrically consistent manner and they represent faithfully the input object. Additionally, our method is able to generalize to images outside of the training domain (more realistic renderings and even real photographs). Finally, the inferred geometric scaffold is itself an accurate estimate of the object\u2019s 3D shape. We demonstrate in several experiments the effectiveness of our approach in both synthetic and real images.}\n}", "pdf": "http://proceedings.mlr.press/v139/rematas21a/rematas21a.pdf", "supp": "", "pdf_size": 9282145, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12802479619132632829&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research; Google Research; Google Research", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/rematas21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sharing Less is More: Lifelong Learning in Deep Networks with Selective Layer Transfer", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10559", "id": "10559", "proceeding": "http://proceedings.mlr.press/v139/lee21a.html", "slides": "/media/icml-2021/Slides/10559.pdf", "author_site": "Seungwon Lee, Sima Behpour, Eric Eaton", "author": "Seungwon Lee; Sima Behpour; Eric Eaton", "abstract": "Effective lifelong learning across diverse tasks requires the transfer of diverse knowledge, yet transferring irrelevant knowledge may lead to interference and catastrophic forgetting. In deep networks, transferring the appropriate granularity of knowledge is as important as the transfer mechanism, and must be driven by the relationships among tasks. We first show that the lifelong learning performance of several current deep learning architectures can be significantly improved by transfer at the appropriate layers. We then develop an expectation-maximization (EM) method to automatically select the appropriate transfer configuration and optimize the task network weights. This EM-based selective transfer is highly effective, balancing transfer performance on all tasks with avoiding catastrophic forgetting, as demonstrated on three algorithms in several lifelong object classification scenarios.", "bibtex": "@InProceedings{pmlr-v139-lee21a,\n title = \t {Sharing Less is More: Lifelong Learning in Deep Networks with Selective Layer Transfer},\n author = {Lee, Seungwon and Behpour, Sima and Eaton, Eric},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6065--6075},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21a/lee21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21a.html},\n abstract = \t {Effective lifelong learning across diverse tasks requires the transfer of diverse knowledge, yet transferring irrelevant knowledge may lead to interference and catastrophic forgetting. In deep networks, transferring the appropriate granularity of knowledge is as important as the transfer mechanism, and must be driven by the relationships among tasks. We first show that the lifelong learning performance of several current deep learning architectures can be significantly improved by transfer at the appropriate layers. We then develop an expectation-maximization (EM) method to automatically select the appropriate transfer configuration and optimize the task network weights. This EM-based selective transfer is highly effective, balancing transfer performance on all tasks with avoiding catastrophic forgetting, as demonstrated on three algorithms in several lifelong object classification scenarios.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21a/lee21a.pdf", "supp": "", "pdf_size": 726120, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17333174523433788810&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA, USA; Computer Science Department, Carnegie Mellon University, Pittsburgh, PA, USA; Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA, USA", "aff_domain": "seas.upenn.edu; ; ", "email": "seas.upenn.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lee21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Pennsylvania;Carnegie Mellon University", "aff_unique_dep": "Department of Computer and Information Science;Computer Science Department", "aff_unique_url": "https://www.upenn.edu;https://www.cmu.edu", "aff_unique_abbr": "UPenn;CMU", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Philadelphia;Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sharper Generalization Bounds for Clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8953", "id": "8953", "proceeding": "http://proceedings.mlr.press/v139/li21k.html", "slides": "", "author_site": "Shaojie Li, Yong Liu", "author": "Shaojie Li; Yong Liu", "abstract": "Existing generalization analysis of clustering mainly focuses on specific instantiations, such as (kernel) $k$-means, and a unified framework for studying clustering performance is still lacking. Besides, the existing excess clustering risk bounds are mostly of order $\\mathcal{O}(K/\\sqrt{n})$ provided that the underlying distribution has bounded support, where $n$ is the sample size and $K$ is the cluster numbers, or of order $\\mathcal{O}(K^2/n)$ under strong assumptions on the underlying distribution, where these assumptions are hard to be verified in general. In this paper, we propose a unified clustering learning framework and investigate its excess risk bounds, obtaining state-of-the-art upper bounds under mild assumptions. Specifically, we derive sharper bounds of order $\\mathcal{O}(K^2/n)$ under mild assumptions on the covering number of the hypothesis spaces, where these assumptions are easy to be verified. Moreover, for the hard clustering scheme, such as (kernel) $k$-means, if just assume the hypothesis functions to be bounded, we improve the upper bounds from the order $\\mathcal{O}(K/\\sqrt{n})$ to $\\mathcal{O}(\\sqrt{K}/\\sqrt{n})$. Furthermore, state-of-the-art bounds of faster order $\\mathcal{O}(K/n)$ are obtained with the covering number assumptions.", "bibtex": "@InProceedings{pmlr-v139-li21k,\n title = \t {Sharper Generalization Bounds for Clustering},\n author = {Li, Shaojie and Liu, Yong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6392--6402},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21k/li21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21k.html},\n abstract = \t {Existing generalization analysis of clustering mainly focuses on specific instantiations, such as (kernel) $k$-means, and a unified framework for studying clustering performance is still lacking. Besides, the existing excess clustering risk bounds are mostly of order $\\mathcal{O}(K/\\sqrt{n})$ provided that the underlying distribution has bounded support, where $n$ is the sample size and $K$ is the cluster numbers, or of order $\\mathcal{O}(K^2/n)$ under strong assumptions on the underlying distribution, where these assumptions are hard to be verified in general. In this paper, we propose a unified clustering learning framework and investigate its excess risk bounds, obtaining state-of-the-art upper bounds under mild assumptions. Specifically, we derive sharper bounds of order $\\mathcal{O}(K^2/n)$ under mild assumptions on the covering number of the hypothesis spaces, where these assumptions are easy to be verified. Moreover, for the hard clustering scheme, such as (kernel) $k$-means, if just assume the hypothesis functions to be bounded, we improve the upper bounds from the order $\\mathcal{O}(K/\\sqrt{n})$ to $\\mathcal{O}(\\sqrt{K}/\\sqrt{n})$. Furthermore, state-of-the-art bounds of faster order $\\mathcal{O}(K/n)$ are obtained with the covering number assumptions.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21k/li21k.pdf", "supp": "", "pdf_size": 358838, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4051560861144460311&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Gaoling School of Arti\ufb01cial Intelligence, Renmin University of China, Beijing, China+Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China; Gaoling School of Arti\ufb01cial Intelligence, Renmin University of China, Beijing, China+Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China", "aff_domain": "ruc.edu.cn;ruc.edu.cn", "email": "ruc.edu.cn;ruc.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21k.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods", "aff_unique_dep": "Gaoling School of Arti\ufb01cial Intelligence;Big Data Management and Analysis", "aff_unique_url": "http://www.ruc.edu.cn;", "aff_unique_abbr": "RUC;", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "China" }, { "title": "Shortest-Path Constrained Reinforcement Learning for Sparse Reward Tasks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9117", "id": "9117", "proceeding": "http://proceedings.mlr.press/v139/sohn21a.html", "slides": "/media/icml-2021/Slides/9117.pdf", "author_site": "Sungryull Sohn, Sungtae Lee, Jongwook Choi, Harm van Seijen, Mehdi Fatemi, Honglak Lee", "author": "Sungryull Sohn; Sungtae Lee; Jongwook Choi; Harm H Van Seijen; Mehdi Fatemi; Honglak Lee", "abstract": "We propose the k-Shortest-Path (k-SP) constraint: a novel constraint on the agent\u2019s trajectory that improves the sample efficiency in sparse-reward MDPs. We show that any optimal policy necessarily satisfies the k-SP constraint. Notably, the k-SP constraint prevents the policy from exploring state-action pairs along the non-k-SP trajectories (e.g., going back and forth). However, in practice, excluding state-action pairs may hinder the convergence of RL algorithms. To overcome this, we propose a novel cost function that penalizes the policy violating SP constraint, instead of completely excluding it. Our numerical experiment in a tabular RL setting demonstrates that the SP-constraint can significantly reduce the trajectory space of policy. As a result, our constraint enables more sample efficient learning by suppressing redundant exploration and exploitation. Our experiments on MiniGrid, DeepMind Lab, Atari, and Fetch show that the proposed method significantly improves proximal policy optimization (PPO) and outperforms existing novelty-seeking exploration methods including count-based exploration even in continuous control tasks, indicating that it improves the sample efficiency by preventing the agent from taking redundant actions.", "bibtex": "@InProceedings{pmlr-v139-sohn21a,\n title = \t {Shortest-Path Constrained Reinforcement Learning for Sparse Reward Tasks},\n author = {Sohn, Sungryull and Lee, Sungtae and Choi, Jongwook and Van Seijen, Harm H and Fatemi, Mehdi and Lee, Honglak},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9780--9790},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sohn21a/sohn21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sohn21a.html},\n abstract = \t {We propose the k-Shortest-Path (k-SP) constraint: a novel constraint on the agent\u2019s trajectory that improves the sample efficiency in sparse-reward MDPs. We show that any optimal policy necessarily satisfies the k-SP constraint. Notably, the k-SP constraint prevents the policy from exploring state-action pairs along the non-k-SP trajectories (e.g., going back and forth). However, in practice, excluding state-action pairs may hinder the convergence of RL algorithms. To overcome this, we propose a novel cost function that penalizes the policy violating SP constraint, instead of completely excluding it. Our numerical experiment in a tabular RL setting demonstrates that the SP-constraint can significantly reduce the trajectory space of policy. As a result, our constraint enables more sample efficient learning by suppressing redundant exploration and exploitation. Our experiments on MiniGrid, DeepMind Lab, Atari, and Fetch show that the proposed method significantly improves proximal policy optimization (PPO) and outperforms existing novelty-seeking exploration methods including count-based exploration even in continuous control tasks, indicating that it improves the sample efficiency by preventing the agent from taking redundant actions.}\n}", "pdf": "http://proceedings.mlr.press/v139/sohn21a/sohn21a.pdf", "supp": "", "pdf_size": 14459067, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5761539218622911437&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Michigan+LG AI Research; Yonsei University; University of Michigan; Microsoft Research; Microsoft Research; University of Michigan+LG AI Research", "aff_domain": "umich.edu; ; ; ; ; ", "email": "umich.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/sohn21a.html", "aff_unique_index": "0+1;2;0;3;3;0+1", "aff_unique_norm": "University of Michigan;LG;Yonsei University;Microsoft", "aff_unique_dep": ";LG AI Research;;Microsoft Research", "aff_unique_url": "https://www.umich.edu;https://www.lgaires.com;https://www.yonsei.ac.kr;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UM;LG AI;Yonsei;MSR", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1;0;0;0;0+1", "aff_country_unique": "United States;South Korea" }, { "title": "SiameseXML: Siamese Networks meet Extreme Classifiers with 100M Labels", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8847", "id": "8847", "proceeding": "http://proceedings.mlr.press/v139/dahiya21a.html", "slides": "", "author_site": "Kunal Dahiya, Ananye Agarwal, Deepak Saini, Gururaj K, Jian Jiao, Amit Singh, Sumeet Agarwal, Purushottam Kar, Manik Varma", "author": "Kunal Dahiya; Ananye Agarwal; Deepak Saini; Gururaj K; Jian Jiao; Amit Singh; Sumeet Agarwal; Purushottam Kar; Manik Varma", "abstract": "Deep extreme multi-label learning (XML) requires training deep architectures that can tag a data point with its most relevant subset of labels from an extremely large label set. XML applications such as ad and product recommendation involve labels rarely seen during training but which nevertheless hold the key to recommendations that delight users. Effective utilization of label metadata and high quality predictions for rare labels at the scale of millions of labels are thus key challenges in contemporary XML research. To address these, this paper develops the SiameseXML framework based on a novel probabilistic model that naturally motivates a modular approach melding Siamese architectures with high-capacity extreme classifiers, and a training pipeline that effortlessly scales to tasks with 100 million labels. SiameseXML offers predictions 2\u201313% more accurate than leading XML methods on public benchmark datasets, as well as in live A/B tests on the Bing search engine, it offers significant gains in click-through-rates, coverage, revenue and other online metrics over state-of-the-art techniques currently in production. Code for SiameseXML is available at https://github.com/Extreme-classification/siamesexml", "bibtex": "@InProceedings{pmlr-v139-dahiya21a,\n title = \t {SiameseXML: Siamese Networks meet Extreme Classifiers with 100M Labels},\n author = {Dahiya, Kunal and Agarwal, Ananye and Saini, Deepak and K, Gururaj and Jiao, Jian and Singh, Amit and Agarwal, Sumeet and Kar, Purushottam and Varma, Manik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2330--2340},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dahiya21a/dahiya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dahiya21a.html},\n abstract = \t {Deep extreme multi-label learning (XML) requires training deep architectures that can tag a data point with its most relevant subset of labels from an extremely large label set. XML applications such as ad and product recommendation involve labels rarely seen during training but which nevertheless hold the key to recommendations that delight users. Effective utilization of label metadata and high quality predictions for rare labels at the scale of millions of labels are thus key challenges in contemporary XML research. To address these, this paper develops the SiameseXML framework based on a novel probabilistic model that naturally motivates a modular approach melding Siamese architectures with high-capacity extreme classifiers, and a training pipeline that effortlessly scales to tasks with 100 million labels. SiameseXML offers predictions 2\u201313% more accurate than leading XML methods on public benchmark datasets, as well as in live A/B tests on the Bing search engine, it offers significant gains in click-through-rates, coverage, revenue and other online metrics over state-of-the-art techniques currently in production. Code for SiameseXML is available at https://github.com/Extreme-classification/siamesexml}\n}", "pdf": "http://proceedings.mlr.press/v139/dahiya21a/dahiya21a.pdf", "supp": "", "pdf_size": 1224238, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6123025362296235844&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Indian Institute of Technology Delhi; Indian Institute of Technology Delhi; Microsoft; Microsoft; Microsoft; Microsoft; Indian Institute of Technology Delhi; Indian Institute of Technology Kanpur + Microsoft; Microsoft + Indian Institute of Technology Delhi", "aff_domain": "gmail.com; ; ; ; ; ; ; ;", "email": "gmail.com; ; ; ; ; ; ; ;", "github": "https://github.com/Extreme-classification/siamesexml", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/dahiya21a.html", "aff_unique_index": "0;0;1;1;1;1;0;2+1;1+0", "aff_unique_norm": "Indian Institute of Technology Delhi;Microsoft;Indian Institute of Technology Kanpur", "aff_unique_dep": ";Microsoft Corporation;", "aff_unique_url": "https://www.iitd.ac.in;https://www.microsoft.com;https://www.iitk.ac.in", "aff_unique_abbr": "IIT Delhi;Microsoft;IIT Kanpur", "aff_campus_unique_index": "0;0;0;2;0", "aff_campus_unique": "Delhi;;Kanpur", "aff_country_unique_index": "0;0;1;1;1;1;0;0+1;1+0", "aff_country_unique": "India;United States" }, { "title": "SigGPDE: Scaling Sparse Gaussian Processes on Sequential Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10563", "id": "10563", "proceeding": "http://proceedings.mlr.press/v139/lemercier21a.html", "slides": "", "author_site": "Maud Lemercier, Cristopher Salvi, Thomas Cass, Edwin V Bonilla, Theodoros Damoulas, Terry Lyons", "author": "Maud Lemercier; Cristopher Salvi; Thomas Cass; Edwin V. Bonilla; Theodoros Damoulas; Terry J Lyons", "abstract": "Making predictions and quantifying their uncertainty when the input data is sequential is a fundamental learning challenge, recently attracting increasing attention. We develop SigGPDE, a new scalable sparse variational inference framework for Gaussian Processes (GPs) on sequential data. Our contribution is twofold. First, we construct inducing variables underpinning the sparse approximation so that the resulting evidence lower bound (ELBO) does not require any matrix inversion. Second, we show that the gradients of the GP signature kernel are solutions of a hyperbolic partial differential equation (PDE). This theoretical insight allows us to build an efficient back-propagation algorithm to optimize the ELBO. We showcase the significant computational gains of SigGPDE compared to existing methods, while achieving state-of-the-art performance for classification tasks on large datasets of up to 1 million multivariate time series.", "bibtex": "@InProceedings{pmlr-v139-lemercier21a,\n title = \t {SigGPDE: Scaling Sparse Gaussian Processes on Sequential Data},\n author = {Lemercier, Maud and Salvi, Cristopher and Cass, Thomas and Bonilla, Edwin V. and Damoulas, Theodoros and Lyons, Terry J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6233--6242},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lemercier21a/lemercier21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lemercier21a.html},\n abstract = \t {Making predictions and quantifying their uncertainty when the input data is sequential is a fundamental learning challenge, recently attracting increasing attention. We develop SigGPDE, a new scalable sparse variational inference framework for Gaussian Processes (GPs) on sequential data. Our contribution is twofold. First, we construct inducing variables underpinning the sparse approximation so that the resulting evidence lower bound (ELBO) does not require any matrix inversion. Second, we show that the gradients of the GP signature kernel are solutions of a hyperbolic partial differential equation (PDE). This theoretical insight allows us to build an efficient back-propagation algorithm to optimize the ELBO. We showcase the significant computational gains of SigGPDE compared to existing methods, while achieving state-of-the-art performance for classification tasks on large datasets of up to 1 million multivariate time series.}\n}", "pdf": "http://proceedings.mlr.press/v139/lemercier21a/lemercier21a.pdf", "supp": "", "pdf_size": 584112, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=897695870939479420&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of Warwick and Alan Turing Institute; University of Oxford and Alan Turing Institute; Imperial College London and Alan Turing Institute; CSIRO\u2019s Data61 and The University of Sydney; University of Warwick and Alan Turing Institute; University of Oxford and Alan Turing Institute", "aff_domain": "warwick.ac.uk; ; ; ; ; ", "email": "warwick.ac.uk; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/lemercier21a.html", "aff_unique_index": "0;1;2;3;0;1", "aff_unique_norm": "University of Warwick;University of Oxford;Imperial College London;CSIRO\u2019s Data61", "aff_unique_dep": ";;;", "aff_unique_url": "https://warwick.ac.uk;https://www.ox.ac.uk;https://www.imperial.ac.uk;https://www.csiro.au/en/Research/Data61", "aff_unique_abbr": "Warwick;Oxford;Imperial;Data61", "aff_campus_unique_index": "1;2;1", "aff_campus_unique": ";Oxford;London", "aff_country_unique_index": "0;0;0;1;0;0", "aff_country_unique": "United Kingdom;Australia" }, { "title": "Signatured Deep Fictitious Play for Mean Field Games with Common Noise", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9157", "id": "9157", "proceeding": "http://proceedings.mlr.press/v139/min21a.html", "slides": "", "author_site": "Ming Min, Ruimeng Hu", "author": "Ming Min; Ruimeng Hu", "abstract": "Existing deep learning methods for solving mean-field games (MFGs) with common noise fix the sampling common noise paths and then solve the corresponding MFGs. This leads to a nested loop structure with millions of simulations of common noise paths in order to produce accurate solutions, which results in prohibitive computational cost and limits the applications to a large extent. In this paper, based on the rough path theory, we propose a novel single-loop algorithm, named signatured deep fictitious play (Sig-DFP), by which we can work with the unfixed common noise setup to avoid the nested loop structure and reduce the computational complexity significantly. The proposed algorithm can accurately capture the effect of common uncertainty changes on mean-field equilibria without further training of neural networks, as previously needed in the existing machine learning algorithms. The efficiency is supported by three applications, including linear-quadratic MFGs, mean-field portfolio game, and mean-field game of optimal consumption and investment. Overall, we provide a new point of view from the rough path theory to solve MFGs with common noise with significantly improved efficiency and an extensive range of applications. In addition, we report the first deep learning work to deal with extended MFGs (a mean-field interaction via both the states and controls) with common noise.", "bibtex": "@InProceedings{pmlr-v139-min21a,\n title = \t {Signatured Deep Fictitious Play for Mean Field Games with Common Noise},\n author = {Min, Ming and Hu, Ruimeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7736--7747},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/min21a/min21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/min21a.html},\n abstract = \t {Existing deep learning methods for solving mean-field games (MFGs) with common noise fix the sampling common noise paths and then solve the corresponding MFGs. This leads to a nested loop structure with millions of simulations of common noise paths in order to produce accurate solutions, which results in prohibitive computational cost and limits the applications to a large extent. In this paper, based on the rough path theory, we propose a novel single-loop algorithm, named signatured deep fictitious play (Sig-DFP), by which we can work with the unfixed common noise setup to avoid the nested loop structure and reduce the computational complexity significantly. The proposed algorithm can accurately capture the effect of common uncertainty changes on mean-field equilibria without further training of neural networks, as previously needed in the existing machine learning algorithms. The efficiency is supported by three applications, including linear-quadratic MFGs, mean-field portfolio game, and mean-field game of optimal consumption and investment. Overall, we provide a new point of view from the rough path theory to solve MFGs with common noise with significantly improved efficiency and an extensive range of applications. In addition, we report the first deep learning work to deal with extended MFGs (a mean-field interaction via both the states and controls) with common noise.}\n}", "pdf": "http://proceedings.mlr.press/v139/min21a/min21a.pdf", "supp": "", "pdf_size": 7029231, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5737626410689821885&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Statistics and Applied Probability, University of California, Santa Barbara, CA 93106-3110, USA+Department of Mathematics, University of California, Santa Barbara, CA 93106-3080, USA; Department of Statistics and Applied Probability, University of California, Santa Barbara, CA 93106-3110, USA+Department of Mathematics, University of California, Santa Barbara, CA 93106-3080, USA", "aff_domain": "ucsb.edu;ucsb.edu", "email": "ucsb.edu;ucsb.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/min21a.html", "aff_unique_index": "0+0;0+0", "aff_unique_norm": "University of California, Santa Barbara", "aff_unique_dep": "Department of Statistics and Applied Probability", "aff_unique_url": "https://www.ucsb.edu", "aff_unique_abbr": "UCSB", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Santa Barbara", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "SimAM: A Simple, Parameter-Free Attention Module for Convolutional Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8921", "id": "8921", "proceeding": "http://proceedings.mlr.press/v139/yang21o.html", "slides": "", "author_site": "Lingxiao YANG, Ru-Yuan Zhang, Lida LI, Xiaohua Xie", "author": "Lingxiao Yang; Ru-Yuan Zhang; Lida Li; Xiaohua Xie", "abstract": "In this paper, we propose a conceptually simple but very effective attention module for Convolutional Neural Networks (ConvNets). In contrast to existing channel-wise and spatial-wise attention modules, our module instead infers 3-D attention weights for the feature map in a layer without adding parameters to the original networks. Specifically, we base on some well-known neuroscience theories and propose to optimize an energy function to find the importance of each neuron. We further derive a fast closed-form solution for the energy function, and show that the solution can be implemented in less than ten lines of code. Another advantage of the module is that most of the operators are selected based on the solution to the defined energy function, avoiding too many efforts for structure tuning. Quantitative evaluations on various visual tasks demonstrate that the proposed module is flexible and effective to improve the representation ability of many ConvNets. Our code is available at Pytorch-SimAM.", "bibtex": "@InProceedings{pmlr-v139-yang21o,\n title = \t {SimAM: A Simple, Parameter-Free Attention Module for Convolutional Neural Networks},\n author = {Yang, Lingxiao and Zhang, Ru-Yuan and Li, Lida and Xie, Xiaohua},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11863--11874},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21o/yang21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21o.html},\n abstract = \t {In this paper, we propose a conceptually simple but very effective attention module for Convolutional Neural Networks (ConvNets). In contrast to existing channel-wise and spatial-wise attention modules, our module instead infers 3-D attention weights for the feature map in a layer without adding parameters to the original networks. Specifically, we base on some well-known neuroscience theories and propose to optimize an energy function to find the importance of each neuron. We further derive a fast closed-form solution for the energy function, and show that the solution can be implemented in less than ten lines of code. Another advantage of the module is that most of the operators are selected based on the solution to the defined energy function, avoiding too many efforts for structure tuning. Quantitative evaluations on various visual tasks demonstrate that the proposed module is flexible and effective to improve the representation ability of many ConvNets. Our code is available at Pytorch-SimAM.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21o/yang21o.pdf", "supp": "", "pdf_size": 1577871, "gs_citation": 1685, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6748424654077587327&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Province Key Laboratory of Information Security Technology, Sun Yat-sen University, Guangzhou, China+Key Laboratory of Machine Intelligence and Advanced Computing, Ministry of Education, Sun Yat-sen University, Guangzhou, China; Institute of Psychology and Behavioral Science, Shanghai Jiao Tong University, Shanghai, China+Shanghai Key Laboratory of Psychotic Disorders, Shanghai Mental Health Center, Shanghai Jiao Tong University, Shanghai, China; The Hong Kong Polytechnic University, Hong Kong, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Province Key Laboratory of Information Security Technology, Sun Yat-sen University, Guangzhou, China+Key Laboratory of Machine Intelligence and Advanced Computing, Ministry of Education, Sun Yat-sen University, Guangzhou, China", "aff_domain": "example.com;example.com;example.com;mail.sysu.edu.cn", "email": "example.com;example.com;example.com;mail.sysu.edu.cn", "github": "", "project": "Pytorch-SimAM", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/yang21o.html", "aff_unique_index": "0+0+0;1+1;2;0+0+0", "aff_unique_norm": "Sun Yat-sen University;Shanghai Jiao Tong University;Hong Kong Polytechnic University", "aff_unique_dep": "School of Computer Science and Engineering;Institute of Psychology and Behavioral Science;", "aff_unique_url": "http://www.sysu.edu.cn;https://www.sjtu.edu.cn;https://www.polyu.edu.hk", "aff_unique_abbr": "SYSU;SJTU;PolyU", "aff_campus_unique_index": "0+0+0;1+1;2;0+0+0", "aff_campus_unique": "Guangzhou;Shanghai;Hong Kong", "aff_country_unique_index": "0+0+0;0+0;0;0+0+0", "aff_country_unique": "China" }, { "title": "Simple and Effective VAE Training with Calibrated Decoders", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9721", "id": "9721", "proceeding": "http://proceedings.mlr.press/v139/rybkin21a.html", "slides": "", "author_site": "Oleh Rybkin, Kostas Daniilidis, Sergey Levine", "author": "Oleh Rybkin; Kostas Daniilidis; Sergey Levine", "abstract": "Variational autoencoders (VAEs) provide an effective and simple method for modeling complex distributions. However, training VAEs often requires considerable hyperparameter tuning to determine the optimal amount of information retained by the latent variable. We study the impact of calibrated decoders, which learn the uncertainty of the decoding distribution and can determine this amount of information automatically, on the VAE performance. While many methods for learning calibrated decoders have been proposed, many of the recent papers that employ VAEs rely on heuristic hyperparameters and ad-hoc modifications instead. We perform the first comprehensive comparative analysis of calibrated decoder and provide recommendations for simple and effective VAE training. Our analysis covers a range of datasets and several single-image and sequential VAE models. We further propose a simple but novel modification to the commonly used Gaussian decoder, which computes the prediction variance analytically. We observe empirically that using heuristic modifications is not necessary with our method.", "bibtex": "@InProceedings{pmlr-v139-rybkin21a,\n title = \t {Simple and Effective VAE Training with Calibrated Decoders},\n author = {Rybkin, Oleh and Daniilidis, Kostas and Levine, Sergey},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9179--9189},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rybkin21a/rybkin21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rybkin21a.html},\n abstract = \t {Variational autoencoders (VAEs) provide an effective and simple method for modeling complex distributions. However, training VAEs often requires considerable hyperparameter tuning to determine the optimal amount of information retained by the latent variable. We study the impact of calibrated decoders, which learn the uncertainty of the decoding distribution and can determine this amount of information automatically, on the VAE performance. While many methods for learning calibrated decoders have been proposed, many of the recent papers that employ VAEs rely on heuristic hyperparameters and ad-hoc modifications instead. We perform the first comprehensive comparative analysis of calibrated decoder and provide recommendations for simple and effective VAE training. Our analysis covers a range of datasets and several single-image and sequential VAE models. We further propose a simple but novel modification to the commonly used Gaussian decoder, which computes the prediction variance analytically. We observe empirically that using heuristic modifications is not necessary with our method.}\n}", "pdf": "http://proceedings.mlr.press/v139/rybkin21a/rybkin21a.pdf", "supp": "", "pdf_size": 1160886, "gs_citation": 120, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16943299314546110740&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Pennsylvania; University of Pennsylvania; UC Berkeley", "aff_domain": "seas.upenn.edu; ; ", "email": "seas.upenn.edu; ; ", "github": "", "project": "https://orybkin.github.io/sigma-vae/", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/rybkin21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Pennsylvania;University of California, Berkeley", "aff_unique_dep": ";", "aff_unique_url": "https://www.upenn.edu;https://www.berkeley.edu", "aff_unique_abbr": "UPenn;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Simultaneous Similarity-based Self-Distillation for Deep Metric Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9287", "id": "9287", "proceeding": "http://proceedings.mlr.press/v139/roth21a.html", "slides": "", "author_site": "Karsten Roth, Timo Milbich, Bjorn Ommer, Joseph Paul Cohen, Marzyeh Ghassemi", "author": "Karsten Roth; Timo Milbich; Bjorn Ommer; Joseph Paul Cohen; Marzyeh Ghassemi", "abstract": "Deep Metric Learning (DML) provides a crucial tool for visual similarity and zero-shot retrieval applications by learning generalizing embedding spaces, although recent work in DML has shown strong performance saturation across training objectives. However, generalization capacity is known to scale with the embedding space dimensionality. Unfortunately, high dimensional embeddings also create higher retrieval cost for downstream applications. To remedy this, we propose S2SD - Simultaneous Similarity-based Self-distillation. S2SD extends DML with knowledge distillation from auxiliary, high-dimensional embedding and feature spaces to leverage complementary context during training while retaining test-time cost and with negligible changes to the training time. Experiments and ablations across different objectives and standard benchmarks show S2SD offering highly significant improvements of up to 7% in Recall@1, while also setting a new state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-roth21a,\n title = \t {Simultaneous Similarity-based Self-Distillation for Deep Metric Learning},\n author = {Roth, Karsten and Milbich, Timo and Ommer, Bjorn and Cohen, Joseph Paul and Ghassemi, Marzyeh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9095--9106},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/roth21a/roth21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/roth21a.html},\n abstract = \t {Deep Metric Learning (DML) provides a crucial tool for visual similarity and zero-shot retrieval applications by learning generalizing embedding spaces, although recent work in DML has shown strong performance saturation across training objectives. However, generalization capacity is known to scale with the embedding space dimensionality. Unfortunately, high dimensional embeddings also create higher retrieval cost for downstream applications. To remedy this, we propose S2SD - Simultaneous Similarity-based Self-distillation. S2SD extends DML with knowledge distillation from auxiliary, high-dimensional embedding and feature spaces to leverage complementary context during training while retaining test-time cost and with negligible changes to the training time. Experiments and ablations across different objectives and standard benchmarks show S2SD offering highly significant improvements of up to 7% in Recall@1, while also setting a new state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/roth21a/roth21a.pdf", "supp": "", "pdf_size": 2720349, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15338344195329866309&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "University of Toronto, Vector Institute+Heidelberg University, IWR; Heidelberg University, IWR; Heidelberg University, IWR; Mila, Universit\u00e9 de Montr\u00e9al; MIT", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "https://github.com/MLforHealth/S2SD", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/roth21a.html", "aff_unique_index": "0+1;1;1;2;3", "aff_unique_norm": "University of Toronto;Heidelberg University;Universit\u00e9 de Montr\u00e9al;Massachusetts Institute of Technology", "aff_unique_dep": ";Interdisciplinary Center for Scientific Computing (IWR);Mila;", "aff_unique_url": "https://www.utoronto.ca;https://www.uni-heidelberg.de;https://umontreal.ca;https://web.mit.edu", "aff_unique_abbr": "U of T;Uni HD;UdeM;MIT", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Toronto;;Montr\u00e9al", "aff_country_unique_index": "0+1;1;1;0;2", "aff_country_unique": "Canada;Germany;United States" }, { "title": "SinIR: Efficient General Image Manipulation with Single Image Reconstruction", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10223", "id": "10223", "proceeding": "http://proceedings.mlr.press/v139/yoo21a.html", "slides": "", "author_site": "Jihyeong Yoo, Qifeng Chen", "author": "Jihyeong Yoo; Qifeng Chen", "abstract": "We propose SinIR, an efficient reconstruction-based framework trained on a single natural image for general image manipulation, including super-resolution, editing, harmonization, paint-to-image, photo-realistic style transfer, and artistic style transfer. We train our model on a single image with cascaded multi-scale learning, where each network at each scale is responsible for image reconstruction. This reconstruction objective greatly reduces the complexity and running time of training, compared to the GAN objective. However, the reconstruction objective also exacerbates the output quality. Therefore, to solve this problem, we further utilize simple random pixel shuffling, which also gives control over manipulation, inspired by the Denoising Autoencoder. With quantitative evaluation, we show that SinIR has competitive performance on various image manipulation tasks. Moreover, with a much simpler training objective (i.e., reconstruction), SinIR is trained 33.5 times faster than SinGAN (for 500x500 images) that solves similar tasks. Our code is publicly available at github.com/YooJiHyeong/SinIR.", "bibtex": "@InProceedings{pmlr-v139-yoo21a,\n title = \t {SinIR: Efficient General Image Manipulation with Single Image Reconstruction},\n author = {Yoo, Jihyeong and Chen, Qifeng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12040--12050},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yoo21a/yoo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yoo21a.html},\n abstract = \t {We propose SinIR, an efficient reconstruction-based framework trained on a single natural image for general image manipulation, including super-resolution, editing, harmonization, paint-to-image, photo-realistic style transfer, and artistic style transfer. We train our model on a single image with cascaded multi-scale learning, where each network at each scale is responsible for image reconstruction. This reconstruction objective greatly reduces the complexity and running time of training, compared to the GAN objective. However, the reconstruction objective also exacerbates the output quality. Therefore, to solve this problem, we further utilize simple random pixel shuffling, which also gives control over manipulation, inspired by the Denoising Autoencoder. With quantitative evaluation, we show that SinIR has competitive performance on various image manipulation tasks. Moreover, with a much simpler training objective (i.e., reconstruction), SinIR is trained 33.5 times faster than SinGAN (for 500x500 images) that solves similar tasks. Our code is publicly available at github.com/YooJiHyeong/SinIR.}\n}", "pdf": "http://proceedings.mlr.press/v139/yoo21a/yoo21a.pdf", "supp": "", "pdf_size": 3928247, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10599627975062939893&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong; Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong", "aff_domain": "ust.hk;ust.hk", "email": "ust.hk;ust.hk", "github": "github.com/YooJiHyeong/SinIR", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yoo21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Hong Kong University of Science and Technology", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ust.hk", "aff_unique_abbr": "HKUST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Single Pass Entrywise-Transformed Low Rank Approximation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8761", "id": "8761", "proceeding": "http://proceedings.mlr.press/v139/jiang21f.html", "slides": "", "author_site": "Yifei Jiang, Yi Li, Yiming Sun, Jiaxin Wang, David Woodruff", "author": "Yifei Jiang; Yi Li; Yiming Sun; Jiaxin Wang; David Woodruff", "abstract": "In applications such as natural language processing or computer vision, one is given a large $n \\times n$ matrix $A = (a_{i,j})$ and would like to compute a matrix decomposition, e.g., a low rank approximation, of a function $f(A) = (f(a_{i,j}))$ applied entrywise to $A$. A very important special case is the likelihood function $f\\left( A \\right ) = \\log{\\left( \\left| a_{ij}\\right| +1\\right)}$. A natural way to do this would be to simply apply $f$ to each entry of $A$, and then compute the matrix decomposition, but this requires storing all of $A$ as well as multiple passes over its entries. Recent work of Liang et al. shows how to find a rank-$k$ factorization to $f(A)$ using only $n \\cdot \\poly(\\eps^{-1}k\\log n)$ words of memory, with overall error $10\\|f(A)-[f(A)]_k\\|_F^2 + \\poly(\\epsilon/k) \\|f(A)\\|_{1,2}^2$, where $[f(A)]_k$ is the best rank-$k$ approximation to $f(A)$ and $\\|f(A)\\|_{1,2}^2$ is the square of the sum of Euclidean lengths of rows of $f(A)$. Their algorithm uses $3$ passes over the entries of $A$. The authors pose the open question of obtaining an algorithm with $n \\cdot \\poly(\\eps^{-1}k\\log n)$ words of memory using only a single pass over the entries of $A$. In this paper we resolve this open question, obtaining the first single-pass algorithm for this problem and for the same class of functions $f$ studied by Liang et al. Moreover, our error is $\\|f(A)-[f(A)]_k\\|_F^2 + \\poly(\\epsilon/k) \\|f(A)\\|_F^2$, where $\\|f(A)\\|_F^2$ is the sum of squares of Euclidean lengths of rows of $f(A)$. Thus our error is significantly smaller, as it removes the factor of $10$ and also $\\|f(A)\\|_F^2 \\leq \\|f(A)\\|_{1,2}^2$.", "bibtex": "@InProceedings{pmlr-v139-jiang21f,\n title = \t {Single Pass Entrywise-Transformed Low Rank Approximation},\n author = {Jiang, Yifei and Li, Yi and Sun, Yiming and Wang, Jiaxin and Woodruff, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4982--4991},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21f/jiang21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21f.html},\n abstract = \t {In applications such as natural language processing or computer vision, one is given a large $n \\times n$ matrix $A = (a_{i,j})$ and would like to compute a matrix decomposition, e.g., a low rank approximation, of a function $f(A) = (f(a_{i,j}))$ applied entrywise to $A$. A very important special case is the likelihood function $f\\left( A \\right ) = \\log{\\left( \\left| a_{ij}\\right| +1\\right)}$. A natural way to do this would be to simply apply $f$ to each entry of $A$, and then compute the matrix decomposition, but this requires storing all of $A$ as well as multiple passes over its entries. Recent work of Liang et al. shows how to find a rank-$k$ factorization to $f(A)$ using only $n \\cdot \\poly(\\eps^{-1}k\\log n)$ words of memory, with overall error $10\\|f(A)-[f(A)]_k\\|_F^2 + \\poly(\\epsilon/k) \\|f(A)\\|_{1,2}^2$, where $[f(A)]_k$ is the best rank-$k$ approximation to $f(A)$ and $\\|f(A)\\|_{1,2}^2$ is the square of the sum of Euclidean lengths of rows of $f(A)$. Their algorithm uses $3$ passes over the entries of $A$. The authors pose the open question of obtaining an algorithm with $n \\cdot \\poly(\\eps^{-1}k\\log n)$ words of memory using only a single pass over the entries of $A$. In this paper we resolve this open question, obtaining the first single-pass algorithm for this problem and for the same class of functions $f$ studied by Liang et al. Moreover, our error is $\\|f(A)-[f(A)]_k\\|_F^2 + \\poly(\\epsilon/k) \\|f(A)\\|_F^2$, where $\\|f(A)\\|_F^2$ is the sum of squares of Euclidean lengths of rows of $f(A)$. Thus our error is significantly smaller, as it removes the factor of $10$ and also $\\|f(A)\\|_F^2 \\leq \\|f(A)\\|_{1,2}^2$.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21f/jiang21f.pdf", "supp": "", "pdf_size": 332115, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17113280724891155524&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Tianjin University, China; School of Physical and Mathematical Sciences, Nanyang Technological University, Singapore; School of Physical and Mathematical Sciences, Nanyang Technological University, Singapore; Wuhan University of Technology, China; Department of Computer Science, Carnegie Mellon University, USA", "aff_domain": "ntu.edu.sg;andrew.cmu.edu; ; ; ", "email": "ntu.edu.sg;andrew.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/jiang21f.html", "aff_unique_index": "0;1;1;2;3", "aff_unique_norm": "Tianjin University;Nanyang Technological University;Wuhan University of Technology;Carnegie Mellon University", "aff_unique_dep": ";School of Physical and Mathematical Sciences;;Department of Computer Science", "aff_unique_url": "http://www.tju.edu.cn;https://www.ntu.edu.sg;http://www.wut.edu.cn;https://www.cmu.edu", "aff_unique_abbr": "Tianjin U;NTU;WUT;CMU", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Singapore", "aff_country_unique_index": "0;1;1;0;2", "aff_country_unique": "China;Singapore;United States" }, { "title": "Sinkhorn Label Allocation: Semi-Supervised Classification via Annealed Self-Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9221", "id": "9221", "proceeding": "http://proceedings.mlr.press/v139/tai21a.html", "slides": "", "author_site": "Kai Sheng Tai, Peter Bailis, Gregory Valiant", "author": "Kai Sheng Tai; Peter D Bailis; Gregory Valiant", "abstract": "Self-training is a standard approach to semi-supervised learning where the learner\u2019s own predictions on unlabeled data are used as supervision during training. In this paper, we reinterpret this label assignment process as an optimal transportation problem between examples and classes, wherein the cost of assigning an example to a class is mediated by the current predictions of the classifier. This formulation facilitates a practical annealing strategy for label assignment and allows for the inclusion of prior knowledge on class proportions via flexible upper bound constraints. The solutions to these assignment problems can be efficiently approximated using Sinkhorn iteration, thus enabling their use in the inner loop of standard stochastic optimization algorithms. We demonstrate the effectiveness of our algorithm on the CIFAR-10, CIFAR-100, and SVHN datasets in comparison with FixMatch, a state-of-the-art self-training algorithm.", "bibtex": "@InProceedings{pmlr-v139-tai21a,\n title = \t {Sinkhorn Label Allocation: Semi-Supervised Classification via Annealed Self-Training},\n author = {Tai, Kai Sheng and Bailis, Peter D and Valiant, Gregory},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10065--10075},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tai21a/tai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tai21a.html},\n abstract = \t {Self-training is a standard approach to semi-supervised learning where the learner\u2019s own predictions on unlabeled data are used as supervision during training. In this paper, we reinterpret this label assignment process as an optimal transportation problem between examples and classes, wherein the cost of assigning an example to a class is mediated by the current predictions of the classifier. This formulation facilitates a practical annealing strategy for label assignment and allows for the inclusion of prior knowledge on class proportions via flexible upper bound constraints. The solutions to these assignment problems can be efficiently approximated using Sinkhorn iteration, thus enabling their use in the inner loop of standard stochastic optimization algorithms. We demonstrate the effectiveness of our algorithm on the CIFAR-10, CIFAR-100, and SVHN datasets in comparison with FixMatch, a state-of-the-art self-training algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/tai21a/tai21a.pdf", "supp": "", "pdf_size": 791853, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13645843302447766832&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "cs.stanford.edu; ; ", "email": "cs.stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tai21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Size-Invariant Graph Representations for Graph Classification Extrapolations", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9239", "id": "9239", "proceeding": "http://proceedings.mlr.press/v139/bevilacqua21a.html", "slides": "/media/icml-2021/Slides/9239.pdf", "author_site": "Beatrice Bevilacqua, Yangze Zhou, Bruno Ribeiro", "author": "Beatrice Bevilacqua; Yangze Zhou; Bruno Ribeiro", "abstract": "In general, graph representation learning methods assume that the train and test data come from the same distribution. In this work we consider an underexplored area of an otherwise rapidly developing field of graph representation learning: The task of out-of-distribution (OOD) graph classification, where train and test data have different distributions, with test data unavailable during training. Our work shows it is possible to use a causal model to learn approximately invariant representations that better extrapolate between train and test data. Finally, we conclude with synthetic and real-world dataset experiments showcasing the benefits of representations that are invariant to train/test distribution shifts.", "bibtex": "@InProceedings{pmlr-v139-bevilacqua21a,\n title = \t {Size-Invariant Graph Representations for Graph Classification Extrapolations},\n author = {Bevilacqua, Beatrice and Zhou, Yangze and Ribeiro, Bruno},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {837--851},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bevilacqua21a/bevilacqua21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bevilacqua21a.html},\n abstract = \t {In general, graph representation learning methods assume that the train and test data come from the same distribution. In this work we consider an underexplored area of an otherwise rapidly developing field of graph representation learning: The task of out-of-distribution (OOD) graph classification, where train and test data have different distributions, with test data unavailable during training. Our work shows it is possible to use a causal model to learn approximately invariant representations that better extrapolate between train and test data. Finally, we conclude with synthetic and real-world dataset experiments showcasing the benefits of representations that are invariant to train/test distribution shifts.}\n}", "pdf": "http://proceedings.mlr.press/v139/bevilacqua21a/bevilacqua21a.pdf", "supp": "", "pdf_size": 523108, "gs_citation": 132, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18387285677592946358&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Purdue University; Department of Statistics, Purdue University; Department of Computer Science, Purdue University", "aff_domain": "purdue.edu; ;purdue.edu", "email": "purdue.edu; ;purdue.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bevilacqua21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Purdue University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.purdue.edu", "aff_unique_abbr": "Purdue", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "SketchEmbedNet: Learning Novel Concepts by Imitating Drawings", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9851", "id": "9851", "proceeding": "http://proceedings.mlr.press/v139/wang21s.html", "slides": "", "author_site": "Alexander Wang, Mengye Ren, Richard Zemel", "author": "Alexander Wang; Mengye Ren; Richard Zemel", "abstract": "Sketch drawings capture the salient information of visual concepts. Previous work has shown that neural networks are capable of producing sketches of natural objects drawn from a small number of classes. While earlier approaches focus on generation quality or retrieval, we explore properties of image representations learned by training a model to produce sketches of images. We show that this generative, class-agnostic model produces informative embeddings of images from novel examples, classes, and even novel datasets in a few-shot setting. Additionally, we find that these learned representations exhibit interesting structure and compositionality.", "bibtex": "@InProceedings{pmlr-v139-wang21s,\n title = \t {SketchEmbedNet: Learning Novel Concepts by Imitating Drawings},\n author = {Wang, Alexander and Ren, Mengye and Zemel, Richard},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10870--10881},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21s/wang21s.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21s.html},\n abstract = \t {Sketch drawings capture the salient information of visual concepts. Previous work has shown that neural networks are capable of producing sketches of natural objects drawn from a small number of classes. While earlier approaches focus on generation quality or retrieval, we explore properties of image representations learned by training a model to produce sketches of images. We show that this generative, class-agnostic model produces informative embeddings of images from novel examples, classes, and even novel datasets in a few-shot setting. Additionally, we find that these learned representations exhibit interesting structure and compositionality.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21s/wang21s.pdf", "supp": "", "pdf_size": 2778388, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8136668361593221793&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Toronto + Vector Institute + CIFAR; University of Toronto + Vector Institute + CIFAR; University of Toronto + Vector Institute + CIFAR", "aff_domain": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "email": "cs.toronto.edu;cs.toronto.edu;cs.toronto.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wang21s.html", "aff_unique_index": "0+1+2;0+1+2;0+1+2", "aff_unique_norm": "University of Toronto;Vector Institute;Canadian Institute for Advanced Research", "aff_unique_dep": ";;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.cifar.ca", "aff_unique_abbr": "U of T;Vector Institute;CIFAR", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", "aff_country_unique": "Canada" }, { "title": "Skew Orthogonal Convolutions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9675", "id": "9675", "proceeding": "http://proceedings.mlr.press/v139/singla21a.html", "slides": "", "author_site": "Sahil Singla, Soheil Feizi", "author": "Sahil Singla; Soheil Feizi", "abstract": "Training convolutional neural networks with a Lipschitz constraint under the $l_{2}$ norm is useful for provable adversarial robustness, interpretable gradients, stable training, etc. While 1-Lipschitz networks can be designed by imposing a 1-Lipschitz constraint on each layer, training such networks requires each layer to be gradient norm preserving (GNP) to prevent gradients from vanishing. However, existing GNP convolutions suffer from slow training, lead to significant reduction in accuracy and provide no guarantees on their approximations. In this work, we propose a GNP convolution layer called \\textbf{S}kew \\textbf{O}rthogonal \\textbf{C}onvolution (SOC) that uses the following mathematical property: when a matrix is {\\it Skew-Symmetric}, its exponential function is an {\\it orthogonal} matrix. To use this property, we first construct a convolution filter whose Jacobian is Skew-Symmetric. Then, we use the Taylor series expansion of the Jacobian exponential to construct the SOC layer that is orthogonal. To efficiently implement SOC, we keep a finite number of terms from the Taylor series and provide a provable guarantee on the approximation error. Our experiments on CIFAR-10 and CIFAR-100 show that SOC allows us to train provably Lipschitz, large convolutional neural networks significantly faster than prior works while achieving significant improvements for both standard and certified robust accuracies.", "bibtex": "@InProceedings{pmlr-v139-singla21a,\n title = \t {Skew Orthogonal Convolutions},\n author = {Singla, Sahil and Feizi, Soheil},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9756--9766},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/singla21a/singla21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/singla21a.html},\n abstract = \t {Training convolutional neural networks with a Lipschitz constraint under the $l_{2}$ norm is useful for provable adversarial robustness, interpretable gradients, stable training, etc. While 1-Lipschitz networks can be designed by imposing a 1-Lipschitz constraint on each layer, training such networks requires each layer to be gradient norm preserving (GNP) to prevent gradients from vanishing. However, existing GNP convolutions suffer from slow training, lead to significant reduction in accuracy and provide no guarantees on their approximations. In this work, we propose a GNP convolution layer called \\textbf{S}kew \\textbf{O}rthogonal \\textbf{C}onvolution (SOC) that uses the following mathematical property: when a matrix is {\\it Skew-Symmetric}, its exponential function is an {\\it orthogonal} matrix. To use this property, we first construct a convolution filter whose Jacobian is Skew-Symmetric. Then, we use the Taylor series expansion of the Jacobian exponential to construct the SOC layer that is orthogonal. To efficiently implement SOC, we keep a finite number of terms from the Taylor series and provide a provable guarantee on the approximation error. Our experiments on CIFAR-10 and CIFAR-100 show that SOC allows us to train provably Lipschitz, large convolutional neural networks significantly faster than prior works while achieving significant improvements for both standard and certified robust accuracies.}\n}", "pdf": "http://proceedings.mlr.press/v139/singla21a/singla21a.pdf", "supp": "", "pdf_size": 1163946, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17464482494309423430&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Maryland, College Park; Department of Computer Science, University of Maryland, College Park", "aff_domain": "umd.edu; ", "email": "umd.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/singla21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Maryland, College Park", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www/umd.edu", "aff_unique_abbr": "UMD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "College Park", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Skill Discovery for Exploration and Planning using Deep Skill Graphs", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9497", "id": "9497", "proceeding": "http://proceedings.mlr.press/v139/bagaria21a.html", "slides": "/media/icml-2021/Slides/9497.pdf", "author_site": "Akhil Bagaria, Jason Senthil, George Konidaris", "author": "Akhil Bagaria; Jason K Senthil; George Konidaris", "abstract": "We introduce a new skill-discovery algorithm that builds a discrete graph representation of large continuous MDPs, where nodes correspond to skill subgoals and the edges to skill policies. The agent constructs this graph during an unsupervised training phase where it interleaves discovering skills and planning using them to gain coverage over ever-increasing portions of the state-space. Given a novel goal at test time, the agent plans with the acquired skill graph to reach a nearby state, then switches to learning to reach the goal. We show that the resulting algorithm, Deep Skill Graphs, outperforms both flat and existing hierarchical reinforcement learning methods on four difficult continuous control tasks.", "bibtex": "@InProceedings{pmlr-v139-bagaria21a,\n title = \t {Skill Discovery for Exploration and Planning using Deep Skill Graphs},\n author = {Bagaria, Akhil and Senthil, Jason K and Konidaris, George},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {521--531},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bagaria21a/bagaria21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bagaria21a.html},\n abstract = \t {We introduce a new skill-discovery algorithm that builds a discrete graph representation of large continuous MDPs, where nodes correspond to skill subgoals and the edges to skill policies. The agent constructs this graph during an unsupervised training phase where it interleaves discovering skills and planning using them to gain coverage over ever-increasing portions of the state-space. Given a novel goal at test time, the agent plans with the acquired skill graph to reach a nearby state, then switches to learning to reach the goal. We show that the resulting algorithm, Deep Skill Graphs, outperforms both flat and existing hierarchical reinforcement learning methods on four difficult continuous control tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/bagaria21a/bagaria21a.pdf", "supp": "", "pdf_size": 4091237, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11687669156452304000&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Department of Computer Science, Brown University; Department of Computer Science, Brown University; Department of Computer Science, Brown University", "aff_domain": "brown.edu; ; ", "email": "brown.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bagaria21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Brown University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.brown.edu", "aff_unique_abbr": "Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sliced Iterative Normalizing Flows", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9119", "id": "9119", "proceeding": "http://proceedings.mlr.press/v139/dai21a.html", "slides": "", "author_site": "Biwei Dai, Uros Seljak", "author": "Biwei Dai; Uros Seljak", "abstract": "We develop an iterative (greedy) deep learning (DL) algorithm which is able to transform an arbitrary probability distribution function (PDF) into the target PDF. The model is based on iterative Optimal Transport of a series of 1D slices, matching on each slice the marginal PDF to the target. The axes of the orthogonal slices are chosen to maximize the PDF difference using Wasserstein distance at each iteration, which enables the algorithm to scale well to high dimensions. As special cases of this algorithm, we introduce two sliced iterative Normalizing Flow (SINF) models, which map from the data to the latent space (GIS) and vice versa (SIG). We show that SIG is able to generate high quality samples of image datasets, which match the GAN benchmarks, while GIS obtains competitive results on density estimation tasks compared to the density trained NFs, and is more stable, faster, and achieves higher p(x) when trained on small training sets. SINF approach deviates significantly from the current DL paradigm, as it is greedy and does not use concepts such as mini-batching, stochastic gradient descent and gradient back-propagation through deep layers.", "bibtex": "@InProceedings{pmlr-v139-dai21a,\n title = \t {Sliced Iterative Normalizing Flows},\n author = {Dai, Biwei and Seljak, Uros},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2352--2364},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/dai21a/dai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/dai21a.html},\n abstract = \t {We develop an iterative (greedy) deep learning (DL) algorithm which is able to transform an arbitrary probability distribution function (PDF) into the target PDF. The model is based on iterative Optimal Transport of a series of 1D slices, matching on each slice the marginal PDF to the target. The axes of the orthogonal slices are chosen to maximize the PDF difference using Wasserstein distance at each iteration, which enables the algorithm to scale well to high dimensions. As special cases of this algorithm, we introduce two sliced iterative Normalizing Flow (SINF) models, which map from the data to the latent space (GIS) and vice versa (SIG). We show that SIG is able to generate high quality samples of image datasets, which match the GAN benchmarks, while GIS obtains competitive results on density estimation tasks compared to the density trained NFs, and is more stable, faster, and achieves higher p(x) when trained on small training sets. SINF approach deviates significantly from the current DL paradigm, as it is greedy and does not use concepts such as mini-batching, stochastic gradient descent and gradient back-propagation through deep layers.}\n}", "pdf": "http://proceedings.mlr.press/v139/dai21a/dai21a.pdf", "supp": "", "pdf_size": 7783919, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2467748158069488227&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Physics, University of California, Berkeley, California, USA+Lawrence Berkeley National Laboratory, Berkeley, California, USA; Department of Physics, University of California, Berkeley, California, USA+Lawrence Berkeley National Laboratory, Berkeley, California, USA", "aff_domain": "berkeley.edu; ", "email": "berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/dai21a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "University of California, Berkeley;Lawrence Berkeley National Laboratory", "aff_unique_dep": "Department of Physics;", "aff_unique_url": "https://www.berkeley.edu;https://www.lbl.gov", "aff_unique_abbr": "UC Berkeley;LBL", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United States" }, { "title": "Slot Machines: Discovering Winning Combinations of Random Weights in Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9155", "id": "9155", "proceeding": "http://proceedings.mlr.press/v139/aladago21a.html", "slides": "/media/icml-2021/Slides/9155.pdf", "author_site": "Maxwell M Aladago, Lorenzo Torresani", "author": "Maxwell M Aladago; Lorenzo Torresani", "abstract": "In contrast to traditional weight optimization in a continuous space, we demonstrate the existence of effective random networks whose weights are never updated. By selecting a weight among a fixed set of random values for each individual connection, our method uncovers combinations of random weights that match the performance of traditionally-trained networks of the same capacity. We refer to our networks as \"slot machines\" where each reel (connection) contains a fixed set of symbols (random values). Our backpropagation algorithm \"spins\" the reels to seek \"winning\" combinations, i.e., selections of random weight values that minimize the given loss. Quite surprisingly, we find that allocating just a few random values to each connection (e.g., 8 values per connection) yields highly competitive combinations despite being dramatically more constrained compared to traditionally learned weights. Moreover, finetuning these combinations often improves performance over the trained baselines. A randomly initialized VGG-19 with 8 values per connection contains a combination that achieves 91% test accuracy on CIFAR-10. Our method also achieves an impressive performance of 98.2% on MNIST for neural networks containing only random weights.", "bibtex": "@InProceedings{pmlr-v139-aladago21a,\n title = \t {Slot Machines: Discovering Winning Combinations of Random Weights in Neural Networks},\n author = {Aladago, Maxwell M and Torresani, Lorenzo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {163--174},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/aladago21a/aladago21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/aladago21a.html},\n abstract = \t {In contrast to traditional weight optimization in a continuous space, we demonstrate the existence of effective random networks whose weights are never updated. By selecting a weight among a fixed set of random values for each individual connection, our method uncovers combinations of random weights that match the performance of traditionally-trained networks of the same capacity. We refer to our networks as \"slot machines\" where each reel (connection) contains a fixed set of symbols (random values). Our backpropagation algorithm \"spins\" the reels to seek \"winning\" combinations, i.e., selections of random weight values that minimize the given loss. Quite surprisingly, we find that allocating just a few random values to each connection (e.g., 8 values per connection) yields highly competitive combinations despite being dramatically more constrained compared to traditionally learned weights. Moreover, finetuning these combinations often improves performance over the trained baselines. A randomly initialized VGG-19 with 8 values per connection contains a combination that achieves 91% test accuracy on CIFAR-10. Our method also achieves an impressive performance of 98.2% on MNIST for neural networks containing only random weights.}\n}", "pdf": "http://proceedings.mlr.press/v139/aladago21a/aladago21a.pdf", "supp": "", "pdf_size": 1418144, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18038275193294463309&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Dartmouth College; Department of Computer Science, Dartmouth College", "aff_domain": "dartmouth.edu; ", "email": "dartmouth.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/aladago21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Dartmouth College", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://dartmouth.edu", "aff_unique_abbr": "Dartmouth", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Smooth $p$-Wasserstein Distance: Structure, Empirical Approximation, and Statistical Applications", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9437", "id": "9437", "proceeding": "http://proceedings.mlr.press/v139/nietert21a.html", "slides": "", "author_site": "Sloan Nietert, Ziv Goldfeld, Kengo Kato", "author": "Sloan Nietert; Ziv Goldfeld; Kengo Kato", "abstract": "Discrepancy measures between probability distributions, often termed statistical distances, are ubiquitous in probability theory, statistics and machine learning. To combat the curse of dimensionality when estimating these distances from data, recent work has proposed smoothing out local irregularities in the measured distributions via convolution with a Gaussian kernel. Motivated by the scalability of this framework to high dimensions, we investigate the structural and statistical behavior of the Gaussian-smoothed $p$-Wasserstein distance $\\mathsf{W}_p^{(\\sigma)}$, for arbitrary $p\\geq 1$. After establishing basic metric and topological properties of $\\mathsf{W}_p^{(\\sigma)}$, we explore the asymptotic statistical properties of $\\mathsf{W}_p^{(\\sigma)}(\\hat{\\mu}_n,\\mu)$, where $\\hat{\\mu}_n$ is the empirical distribution of $n$ independent observations from $\\mu$. We prove that $\\mathsf{W}_p^{(\\sigma)}$ enjoys a parametric empirical convergence rate of $n^{-1/2}$, which contrasts the $n^{-1/d}$ rate for unsmoothed $\\Wp$ when $d \\geq 3$. Our proof relies on controlling $\\mathsf{W}_p^{(\\sigma)}$ by a $p$th-order smooth Sobolev distance $\\mathsf{d}_p^{(\\sigma)}$ and deriving the limit distribution of $\\sqrt{n}\\,\\mathsf{d}_p^{(\\sigma)}(\\hat{\\mu}_n,\\mu)$ for all dimensions $d$. As applications, we provide asymptotic guarantees for two-sample testing and minimum distance estimation using $\\mathsf{W}_p^{(\\sigma)}$, with experiments for $p=2$ using a maximum mean discrepancy formulation\u00a0of\u00a0$\\mathsf{d}_2^{(\\sigma)}$.", "bibtex": "@InProceedings{pmlr-v139-nietert21a,\n title = \t {Smooth $p$-Wasserstein Distance: Structure, Empirical Approximation, and Statistical Applications},\n author = {Nietert, Sloan and Goldfeld, Ziv and Kato, Kengo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8172--8183},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nietert21a/nietert21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nietert21a.html},\n abstract = \t {Discrepancy measures between probability distributions, often termed statistical distances, are ubiquitous in probability theory, statistics and machine learning. To combat the curse of dimensionality when estimating these distances from data, recent work has proposed smoothing out local irregularities in the measured distributions via convolution with a Gaussian kernel. Motivated by the scalability of this framework to high dimensions, we investigate the structural and statistical behavior of the Gaussian-smoothed $p$-Wasserstein distance $\\mathsf{W}_p^{(\\sigma)}$, for arbitrary $p\\geq 1$. After establishing basic metric and topological properties of $\\mathsf{W}_p^{(\\sigma)}$, we explore the asymptotic statistical properties of $\\mathsf{W}_p^{(\\sigma)}(\\hat{\\mu}_n,\\mu)$, where $\\hat{\\mu}_n$ is the empirical distribution of $n$ independent observations from $\\mu$. We prove that $\\mathsf{W}_p^{(\\sigma)}$ enjoys a parametric empirical convergence rate of $n^{-1/2}$, which contrasts the $n^{-1/d}$ rate for unsmoothed $\\Wp$ when $d \\geq 3$. Our proof relies on controlling $\\mathsf{W}_p^{(\\sigma)}$ by a $p$th-order smooth Sobolev distance $\\mathsf{d}_p^{(\\sigma)}$ and deriving the limit distribution of $\\sqrt{n}\\,\\mathsf{d}_p^{(\\sigma)}(\\hat{\\mu}_n,\\mu)$ for all dimensions $d$. As applications, we provide asymptotic guarantees for two-sample testing and minimum distance estimation using $\\mathsf{W}_p^{(\\sigma)}$, with experiments for $p=2$ using a maximum mean discrepancy formulation\u00a0of\u00a0$\\mathsf{d}_2^{(\\sigma)}$.}\n}", "pdf": "http://proceedings.mlr.press/v139/nietert21a/nietert21a.pdf", "supp": "", "pdf_size": 4058239, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2656744074285717055&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Cornell University, Ithaca, NY; School of Electrical and Computer Engineering, Cornell University, Ithaca, NY; Department of Statistics and Data Science, Cornell University, Ithaca, NY", "aff_domain": "cornell.edu; ; ", "email": "cornell.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nietert21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Soft then Hard: Rethinking the Quantization in Neural Image Compression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8433", "id": "8433", "proceeding": "http://proceedings.mlr.press/v139/guo21c.html", "slides": "/media/icml-2021/Slides/8433.pdf", "author_site": "Zongyu Guo, Zhizheng Zhang, Runsen Feng, Zhibo Chen", "author": "Zongyu Guo; Zhizheng Zhang; Runsen Feng; Zhibo Chen", "abstract": "Quantization is one of the core components in lossy image compression. For neural image compression, end-to-end optimization requires differentiable approximations of quantization, which can generally be grouped into three categories: additive uniform noise, straight-through estimator and soft-to-hard annealing. Training with additive uniform noise approximates the quantization error variationally but suffers from the train-test mismatch. The other two methods do not encounter this mismatch but, as shown in this paper, hurt the rate-distortion performance since the latent representation ability is weakened. We thus propose a novel soft-then-hard quantization strategy for neural image compression that first learns an expressive latent space softly, then closes the train-test mismatch with hard quantization. In addition, beyond the fixed integer-quantization, we apply scaled additive uniform noise to adaptively control the quantization granularity by deriving a new variational upper bound on actual rate. Experiments demonstrate that our proposed methods are easy to adopt, stable to train, and highly effective especially on complex compression models.", "bibtex": "@InProceedings{pmlr-v139-guo21c,\n title = \t {Soft then Hard: Rethinking the Quantization in Neural Image Compression},\n author = {Guo, Zongyu and Zhang, Zhizheng and Feng, Runsen and Chen, Zhibo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3920--3929},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/guo21c/guo21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/guo21c.html},\n abstract = \t {Quantization is one of the core components in lossy image compression. For neural image compression, end-to-end optimization requires differentiable approximations of quantization, which can generally be grouped into three categories: additive uniform noise, straight-through estimator and soft-to-hard annealing. Training with additive uniform noise approximates the quantization error variationally but suffers from the train-test mismatch. The other two methods do not encounter this mismatch but, as shown in this paper, hurt the rate-distortion performance since the latent representation ability is weakened. We thus propose a novel soft-then-hard quantization strategy for neural image compression that first learns an expressive latent space softly, then closes the train-test mismatch with hard quantization. In addition, beyond the fixed integer-quantization, we apply scaled additive uniform noise to adaptively control the quantization granularity by deriving a new variational upper bound on actual rate. Experiments demonstrate that our proposed methods are easy to adopt, stable to train, and highly effective especially on complex compression models.}\n}", "pdf": "http://proceedings.mlr.press/v139/guo21c/guo21c.pdf", "supp": "", "pdf_size": 4689246, "gs_citation": 92, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17397522208286627287&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China", "aff_domain": "mail.ustc.edu.cn; ; ;ustc.edu.cn", "email": "mail.ustc.edu.cn; ; ;ustc.edu.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/guo21c.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Science and Technology of China", "aff_unique_dep": "", "aff_unique_url": "http://www.ustc.edu.cn", "aff_unique_abbr": "USTC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Solving Challenging Dexterous Manipulation Tasks With Trajectory Optimisation and Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8517", "id": "8517", "proceeding": "http://proceedings.mlr.press/v139/charlesworth21a.html", "slides": "", "author_site": "Henry Charlesworth, Giovanni Montana", "author": "Henry J Charlesworth; Giovanni Montana", "abstract": "Training agents to autonomously control anthropomorphic robotic hands has the potential to lead to systems capable of performing a multitude of complex manipulation tasks in unstructured and uncertain environments. In this work, we first introduce a suite of challenging simulated manipulation tasks where current reinforcement learning and trajectory optimisation techniques perform poorly. These include environments where two simulated hands have to pass or throw objects between each other, as well as an environment where the agent must learn to spin a long pen between its fingers. We then introduce a simple trajectory optimisation algorithm that performs significantly better than existing methods on these environments. Finally, on the most challenging \u201cPenSpin\" task, we combine sub-optimal demonstrations generated through trajectory optimisation with off-policy reinforcement learning, obtaining performance that far exceeds either of these approaches individually. Videos of all of our results are available at: https://dexterous-manipulation.github.io", "bibtex": "@InProceedings{pmlr-v139-charlesworth21a,\n title = \t {Solving Challenging Dexterous Manipulation Tasks With Trajectory Optimisation and Reinforcement Learning},\n author = {Charlesworth, Henry J and Montana, Giovanni},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1496--1506},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/charlesworth21a/charlesworth21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/charlesworth21a.html},\n abstract = \t {Training agents to autonomously control anthropomorphic robotic hands has the potential to lead to systems capable of performing a multitude of complex manipulation tasks in unstructured and uncertain environments. In this work, we first introduce a suite of challenging simulated manipulation tasks where current reinforcement learning and trajectory optimisation techniques perform poorly. These include environments where two simulated hands have to pass or throw objects between each other, as well as an environment where the agent must learn to spin a long pen between its fingers. We then introduce a simple trajectory optimisation algorithm that performs significantly better than existing methods on these environments. Finally, on the most challenging \u201cPenSpin\" task, we combine sub-optimal demonstrations generated through trajectory optimisation with off-policy reinforcement learning, obtaining performance that far exceeds either of these approaches individually. Videos of all of our results are available at: https://dexterous-manipulation.github.io}\n}", "pdf": "http://proceedings.mlr.press/v139/charlesworth21a/charlesworth21a.pdf", "supp": "", "pdf_size": 1304189, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12065151754408475895&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Warwick Manufacturing Group, University of Warwick, Coventry, United Kingdom; Warwick Manufacturing Group, University of Warwick, Coventry, United Kingdom", "aff_domain": "warwick.ac.uk;warwick.ac.uk", "email": "warwick.ac.uk;warwick.ac.uk", "github": "", "project": "https://dexterous-manipulation.github.io/", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/charlesworth21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Warwick", "aff_unique_dep": "Warwick Manufacturing Group", "aff_unique_url": "https://www.warwick.ac.uk", "aff_unique_abbr": "Warwick", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Coventry", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Solving Inverse Problems with a Flow-based Noise Model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9451", "id": "9451", "proceeding": "http://proceedings.mlr.press/v139/whang21a.html", "slides": "", "author_site": "Jay Whang, Qi Lei, Alexandros Dimakis", "author": "Jay Whang; Qi Lei; Alex Dimakis", "abstract": "We study image inverse problems with a normalizing flow prior. Our formulation views the solution as the maximum a posteriori estimate of the image conditioned on the measurements. This formulation allows us to use noise models with arbitrary dependencies as well as non-linear forward operators. We empirically validate the efficacy of our method on various inverse problems, including compressed sensing with quantized measurements and denoising with highly structured noise patterns. We also present initial theoretical recovery guarantees for solving inverse problems with a flow prior.", "bibtex": "@InProceedings{pmlr-v139-whang21a,\n title = \t {Solving Inverse Problems with a Flow-based Noise Model},\n author = {Whang, Jay and Lei, Qi and Dimakis, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11146--11157},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/whang21a/whang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/whang21a.html},\n abstract = \t {We study image inverse problems with a normalizing flow prior. Our formulation views the solution as the maximum a posteriori estimate of the image conditioned on the measurements. This formulation allows us to use noise models with arbitrary dependencies as well as non-linear forward operators. We empirically validate the efficacy of our method on various inverse problems, including compressed sensing with quantized measurements and denoising with highly structured noise patterns. We also present initial theoretical recovery guarantees for solving inverse problems with a flow prior.}\n}", "pdf": "http://proceedings.mlr.press/v139/whang21a/whang21a.pdf", "supp": "", "pdf_size": 863264, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5823296642317647105&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": "Dept. of Computer Science, UT Austin, TX, USA; Dept. of Electrical and Computer Engineering, Princeton University, NJ, USA; Dept. of Electrical and Computer Engineering, UT Austin, TX, USA", "aff_domain": "utexas.edu; ; ", "email": "utexas.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/whang21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Texas at Austin;Princeton University", "aff_unique_dep": "Department of Computer Science;Dept. of Electrical and Computer Engineering", "aff_unique_url": "https://www.utexas.edu;https://www.princeton.edu", "aff_unique_abbr": "UT Austin;Princeton", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Austin;Princeton", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Solving high-dimensional parabolic PDEs using the tensor train format", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9927", "id": "9927", "proceeding": "http://proceedings.mlr.press/v139/richter21a.html", "slides": "", "author_site": "Lorenz Richter, Leon Sallandt, Nikolas N\u00fcsken", "author": "Lorenz Richter; Leon Sallandt; Nikolas N\u00fcsken", "abstract": "High-dimensional partial differential equations (PDEs) are ubiquitous in economics, science and engineering. However, their numerical treatment poses formidable challenges since traditional grid-based methods tend to be frustrated by the curse of dimensionality. In this paper, we argue that tensor trains provide an appealing approximation framework for parabolic PDEs: the combination of reformulations in terms of backward stochastic differential equations and regression-type methods in the tensor format holds the promise of leveraging latent low-rank structures enabling both compression and efficient computation. Following this paradigm, we develop novel iterative schemes, involving either explicit and fast or implicit and accurate updates. We demonstrate in a number of examples that our methods achieve a favorable trade-off between accuracy and computational efficiency in comparison with state-of-the-art neural network based approaches.", "bibtex": "@InProceedings{pmlr-v139-richter21a,\n title = \t {Solving high-dimensional parabolic PDEs using the tensor train format},\n author = {Richter, Lorenz and Sallandt, Leon and N{\\\"u}sken, Nikolas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8998--9009},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/richter21a/richter21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/richter21a.html},\n abstract = \t {High-dimensional partial differential equations (PDEs) are ubiquitous in economics, science and engineering. However, their numerical treatment poses formidable challenges since traditional grid-based methods tend to be frustrated by the curse of dimensionality. In this paper, we argue that tensor trains provide an appealing approximation framework for parabolic PDEs: the combination of reformulations in terms of backward stochastic differential equations and regression-type methods in the tensor format holds the promise of leveraging latent low-rank structures enabling both compression and efficient computation. Following this paradigm, we develop novel iterative schemes, involving either explicit and fast or implicit and accurate updates. We demonstrate in a number of examples that our methods achieve a favorable trade-off between accuracy and computational efficiency in comparison with state-of-the-art neural network based approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/richter21a/richter21a.pdf", "supp": "", "pdf_size": 2966795, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11792660313798176886&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Freie Universit\u00e4t Berlin, Germany+BTU Cottbus-Senftenberg, Germany+dida Datenschmiede GmbH, Germany; Technische Universit\u00e4t Berlin, Germany; Universit\u00e4t Potsdam, Germany", "aff_domain": "fu-berlin.de;math.tu-berlin.de; ", "email": "fu-berlin.de;math.tu-berlin.de; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/richter21a.html", "aff_unique_index": "0+1+2;3;4", "aff_unique_norm": "Freie Universit\u00e4t Berlin;Brandenburg University of Technology Cottbus-Senftenberg;dida Datenschmiede GmbH;Technische Universit\u00e4t Berlin;University of Potsdam", "aff_unique_dep": ";;;;", "aff_unique_url": "https://www.fu-berlin.de;https://www.btu-cottbus.de;;https://www.tu-berlin.de;https://www.uni-potsdam.de", "aff_unique_abbr": "FU Berlin;BTU;;TU Berlin;UP", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0;0", "aff_country_unique": "Germany" }, { "title": "SoundDet: Polyphonic Moving Sound Event Detection and Localization from Raw Waveform", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8603", "id": "8603", "proceeding": "http://proceedings.mlr.press/v139/he21b.html", "slides": "", "author_site": "Yuhang He, Niki Trigoni, Andrew Markham", "author": "Yuhang He; Niki Trigoni; Andrew Markham", "abstract": "We present a new framework SoundDet, which is an end-to-end trainable and light-weight framework, for polyphonic moving sound event detection and localization. Prior methods typically approach this problem by preprocessing raw waveform into time-frequency representations, which is more amenable to process with well-established image processing pipelines. Prior methods also detect in segment-wise manner, leading to incomplete and partial detections. SoundDet takes a novel approach and directly consumes the raw, multichannel waveform and treats the spatio-temporal sound event as a complete \u201csound-object\" to be detected. Specifically, SoundDet consists of a backbone neural network and two parallel heads for temporal detection and spatial localization, respectively. Given the large sampling rate of raw waveform, the backbone network first learns a set of phase-sensitive and frequency-selective bank of filters to explicitly retain direction-of-arrival information, whilst being highly computationally and parametrically efficient than standard 1D/2D convolution. A dense sound event proposal map is then constructed to handle the challenges of predicting events with large varying temporal duration. Accompanying the dense proposal map are a temporal overlapness map and a motion smoothness map that measure a proposal\u2019s confidence to be an event from temporal detection accuracy and movement consistency perspective. Involving the two maps guarantees SoundDet to be trained in a spatio-temporally unified manner. Experimental results on the public DCASE dataset show the advantage of SoundDet on both segment-based evaluation and our newly proposed event-based evaluation system.", "bibtex": "@InProceedings{pmlr-v139-he21b,\n title = \t {SoundDet: Polyphonic Moving Sound Event Detection and Localization from Raw Waveform},\n author = {He, Yuhang and Trigoni, Niki and Markham, Andrew},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4160--4170},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/he21b/he21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/he21b.html},\n abstract = \t {We present a new framework SoundDet, which is an end-to-end trainable and light-weight framework, for polyphonic moving sound event detection and localization. Prior methods typically approach this problem by preprocessing raw waveform into time-frequency representations, which is more amenable to process with well-established image processing pipelines. Prior methods also detect in segment-wise manner, leading to incomplete and partial detections. SoundDet takes a novel approach and directly consumes the raw, multichannel waveform and treats the spatio-temporal sound event as a complete \u201csound-object\" to be detected. Specifically, SoundDet consists of a backbone neural network and two parallel heads for temporal detection and spatial localization, respectively. Given the large sampling rate of raw waveform, the backbone network first learns a set of phase-sensitive and frequency-selective bank of filters to explicitly retain direction-of-arrival information, whilst being highly computationally and parametrically efficient than standard 1D/2D convolution. A dense sound event proposal map is then constructed to handle the challenges of predicting events with large varying temporal duration. Accompanying the dense proposal map are a temporal overlapness map and a motion smoothness map that measure a proposal\u2019s confidence to be an event from temporal detection accuracy and movement consistency perspective. Involving the two maps guarantees SoundDet to be trained in a spatio-temporally unified manner. Experimental results on the public DCASE dataset show the advantage of SoundDet on both segment-based evaluation and our newly proposed event-based evaluation system.}\n}", "pdf": "http://proceedings.mlr.press/v139/he21b/he21b.pdf", "supp": "", "pdf_size": 3214622, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17986954624058522857&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, University of Oxford; Department of Computer Science, University of Oxford; Department of Computer Science, University of Oxford", "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk", "email": "cs.ox.ac.uk;cs.ox.ac.uk;cs.ox.ac.uk", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/he21b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Sparse Bayesian Learning via Stepwise Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9953", "id": "9953", "proceeding": "http://proceedings.mlr.press/v139/ament21a.html", "slides": "", "author_site": "Sebastian Ament, Carla Gomes", "author": "Sebastian E. Ament; Carla P. Gomes", "abstract": "Sparse Bayesian Learning (SBL) is a powerful framework for attaining sparsity in probabilistic models. Herein, we propose a coordinate ascent algorithm for SBL termed Relevance Matching Pursuit (RMP) and show that, as its noise variance parameter goes to zero, RMP exhibits a surprising connection to Stepwise Regression. Further, we derive novel guarantees for Stepwise Regression algorithms, which also shed light on RMP. Our guarantees for Forward Regression improve on deterministic and probabilistic results for Orthogonal Matching Pursuit with noise. Our analysis of Backward Regression culminates in a bound on the residual of the optimal solution to the subset selection problem that, if satisfied, guarantees the optimality of the result. To our knowledge, this bound is the first that can be computed in polynomial time and depends chiefly on the smallest singular value of the matrix. We report numerical experiments using a variety of feature selection algorithms. Notably, RMP and its limiting variant are both efficient and maintain strong performance with correlated features.", "bibtex": "@InProceedings{pmlr-v139-ament21a,\n title = \t {Sparse Bayesian Learning via Stepwise Regression},\n author = {Ament, Sebastian E. and Gomes, Carla P.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {264--274},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ament21a/ament21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ament21a.html},\n abstract = \t {Sparse Bayesian Learning (SBL) is a powerful framework for attaining sparsity in probabilistic models. Herein, we propose a coordinate ascent algorithm for SBL termed Relevance Matching Pursuit (RMP) and show that, as its noise variance parameter goes to zero, RMP exhibits a surprising connection to Stepwise Regression. Further, we derive novel guarantees for Stepwise Regression algorithms, which also shed light on RMP. Our guarantees for Forward Regression improve on deterministic and probabilistic results for Orthogonal Matching Pursuit with noise. Our analysis of Backward Regression culminates in a bound on the residual of the optimal solution to the subset selection problem that, if satisfied, guarantees the optimality of the result. To our knowledge, this bound is the first that can be computed in polynomial time and depends chiefly on the smallest singular value of the matrix. We report numerical experiments using a variety of feature selection algorithms. Notably, RMP and its limiting variant are both efficient and maintain strong performance with correlated features.}\n}", "pdf": "http://proceedings.mlr.press/v139/ament21a/ament21a.pdf", "supp": "", "pdf_size": 820467, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14029385398750356286&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Cornell University; Department of Computer Science, Cornell University", "aff_domain": "cs.cornell.edu; ", "email": "cs.cornell.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ament21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Sparse Feature Selection Makes Batch Reinforcement Learning More Sample Efficient", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8513", "id": "8513", "proceeding": "http://proceedings.mlr.press/v139/hao21a.html", "slides": "", "author_site": "Botao Hao, Yaqi Duan, Tor Lattimore, Csaba Szepesvari, Mengdi Wang", "author": "Botao Hao; Yaqi Duan; Tor Lattimore; Csaba Szepesvari; Mengdi Wang", "abstract": "This paper provides a statistical analysis of high-dimensional batch reinforcement learning (RL) using sparse linear function approximation. When there is a large number of candidate features, our result sheds light on the fact that sparsity-aware methods can make batch RL more sample efficient. We first consider the off-policy policy evaluation problem. To evaluate a new target policy, we analyze a Lasso fitted Q-evaluation method and establish a finite-sample error bound that has no polynomial dependence on the ambient dimension. To reduce the Lasso bias, we further propose a post model-selection estimator that applies fitted Q-evaluation to the features selected via group Lasso. Under an additional signal strength assumption, we derive a sharper instance-dependent error bound that depends on a divergence function measuring the distribution mismatch between the data distribution and occupancy measure of the target policy. Further, we study the Lasso fitted Q-iteration for batch policy optimization and establish a finite-sample error bound depending on the ratio between the number of relevant features and restricted minimal eigenvalue of the data\u2019s covariance. In the end, we complement the results with minimax lower bounds for batch-data policy evaluation/optimization that nearly match our upper bounds. The results suggest that having well-conditioned data is crucial for sparse batch policy learning.", "bibtex": "@InProceedings{pmlr-v139-hao21a,\n title = \t {Sparse Feature Selection Makes Batch Reinforcement Learning More Sample Efficient},\n author = {Hao, Botao and Duan, Yaqi and Lattimore, Tor and Szepesvari, Csaba and Wang, Mengdi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4063--4073},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hao21a/hao21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hao21a.html},\n abstract = \t {This paper provides a statistical analysis of high-dimensional batch reinforcement learning (RL) using sparse linear function approximation. When there is a large number of candidate features, our result sheds light on the fact that sparsity-aware methods can make batch RL more sample efficient. We first consider the off-policy policy evaluation problem. To evaluate a new target policy, we analyze a Lasso fitted Q-evaluation method and establish a finite-sample error bound that has no polynomial dependence on the ambient dimension. To reduce the Lasso bias, we further propose a post model-selection estimator that applies fitted Q-evaluation to the features selected via group Lasso. Under an additional signal strength assumption, we derive a sharper instance-dependent error bound that depends on a divergence function measuring the distribution mismatch between the data distribution and occupancy measure of the target policy. Further, we study the Lasso fitted Q-iteration for batch policy optimization and establish a finite-sample error bound depending on the ratio between the number of relevant features and restricted minimal eigenvalue of the data\u2019s covariance. In the end, we complement the results with minimax lower bounds for batch-data policy evaluation/optimization that nearly match our upper bounds. The results suggest that having well-conditioned data is crucial for sparse batch policy learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/hao21a/hao21a.pdf", "supp": "", "pdf_size": 388290, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2586341014497999120&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Deepmind; Princeton University; Deepmind; University of Alberta; Princeton University", "aff_domain": "gmail.com; ; ;princeton.edu; ", "email": "gmail.com; ; ;princeton.edu; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/hao21a.html", "aff_unique_index": "0;1;0;2;1", "aff_unique_norm": "DeepMind;Princeton University;University of Alberta", "aff_unique_dep": ";;", "aff_unique_url": "https://deepmind.com;https://www.princeton.edu;https://www.ualberta.ca", "aff_unique_abbr": "DeepMind;Princeton;UAlberta", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;2;1", "aff_country_unique": "United Kingdom;United States;Canada" }, { "title": "Sparse and Imperceptible Adversarial Attack via a Homotopy Algorithm", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10035", "id": "10035", "proceeding": "http://proceedings.mlr.press/v139/zhu21a.html", "slides": "", "author_site": "Mingkang Zhu, Tianlong Chen, Zhangyang \u201cAtlas\u201d Wang", "author": "Mingkang Zhu; Tianlong Chen; Zhangyang Wang", "abstract": "Sparse adversarial attacks can fool deep neural networks (DNNs) by only perturbing a few pixels (regularized by $\\ell_0$ norm). Recent efforts combine it with another $\\ell_\\infty$ imperceptible on the perturbation magnitudes. The resultant sparse and imperceptible attacks are practically relevant, and indicate an even higher vulnerability of DNNs that we usually imagined. However, such attacks are more challenging to generate due to the optimization difficulty by coupling the $\\ell_0$ regularizer and box constraints with a non-convex objective. In this paper, we address this challenge by proposing a homotopy algorithm, to jointly tackle the sparsity and the perturbation bound in one unified framework. Each iteration, the main step of our algorithm is to optimize an $\\ell_0$-regularized adversarial loss, by leveraging the nonmonotone Accelerated Proximal Gradient Method (nmAPG) for nonconvex programming; it is followed by an $\\ell_0$ change control step, and an optional post-attack step designed to escape bad local minima. We also extend the algorithm to handling the structural sparsity regularizer. We extensively examine the effectiveness of our proposed \\textbf{homotopy attack} for both targeted and non-targeted attack scenarios, on CIFAR-10 and ImageNet datasets. Compared to state-of-the-art methods, our homotopy attack leads to significantly fewer perturbations, e.g., reducing 42.91% on CIFAR-10 and 75.03% on ImageNet (average case, targeted attack), at similar maximal perturbation magnitudes, when still achieving 100% attack success rates. Our codes are available at: {\\small\\url{https://github.com/VITA-Group/SparseADV_Homotopy}}.", "bibtex": "@InProceedings{pmlr-v139-zhu21a,\n title = \t {Sparse and Imperceptible Adversarial Attack via a Homotopy Algorithm},\n author = {Zhu, Mingkang and Chen, Tianlong and Wang, Zhangyang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12868--12877},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21a/zhu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21a.html},\n abstract = \t {Sparse adversarial attacks can fool deep neural networks (DNNs) by only perturbing a few pixels (regularized by $\\ell_0$ norm). Recent efforts combine it with another $\\ell_\\infty$ imperceptible on the perturbation magnitudes. The resultant sparse and imperceptible attacks are practically relevant, and indicate an even higher vulnerability of DNNs that we usually imagined. However, such attacks are more challenging to generate due to the optimization difficulty by coupling the $\\ell_0$ regularizer and box constraints with a non-convex objective. In this paper, we address this challenge by proposing a homotopy algorithm, to jointly tackle the sparsity and the perturbation bound in one unified framework. Each iteration, the main step of our algorithm is to optimize an $\\ell_0$-regularized adversarial loss, by leveraging the nonmonotone Accelerated Proximal Gradient Method (nmAPG) for nonconvex programming; it is followed by an $\\ell_0$ change control step, and an optional post-attack step designed to escape bad local minima. We also extend the algorithm to handling the structural sparsity regularizer. We extensively examine the effectiveness of our proposed \\textbf{homotopy attack} for both targeted and non-targeted attack scenarios, on CIFAR-10 and ImageNet datasets. Compared to state-of-the-art methods, our homotopy attack leads to significantly fewer perturbations, e.g., reducing 42.91% on CIFAR-10 and 75.03% on ImageNet (average case, targeted attack), at similar maximal perturbation magnitudes, when still achieving 100% attack success rates. Our codes are available at: {\\small\\url{https://github.com/VITA-Group/SparseADV_Homotopy}}.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21a/zhu21a.pdf", "supp": "", "pdf_size": 3362429, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18221995160833723432&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "The University of Texas at Austin, USA; The University of Texas at Austin, USA; The University of Texas at Austin, USA", "aff_domain": "utexas.edu;utexas.edu;utexas.edu", "email": "utexas.edu;utexas.edu;utexas.edu", "github": "https://github.com/VITA-Group/SparseADV_Homotopy", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sparse within Sparse Gaussian Processes using Neighbor Information", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9153", "id": "9153", "proceeding": "http://proceedings.mlr.press/v139/tran21a.html", "slides": "", "author_site": "Gia-Lac Tran, Dimitrios Milios, Pietro Michiardi, Maurizio Filippone", "author": "Gia-Lac Tran; Dimitrios Milios; Pietro Michiardi; Maurizio Filippone", "abstract": "Approximations to Gaussian processes (GPs) based on inducing variables, combined with variational inference techniques, enable state-of-the-art sparse approaches to infer GPs at scale through mini-batch based learning. In this work, we further push the limits of scalability of sparse GPs by allowing large number of inducing variables without imposing a special structure on the inducing inputs. In particular, we introduce a novel hierarchical prior, which imposes sparsity on the set of inducing variables. We treat our model variationally, and we experimentally show considerable computational gains compared to standard sparse GPs when sparsity on the inducing variables is realized considering the nearest inducing inputs of a random mini-batch of the data. We perform an extensive experimental validation that demonstrates the effectiveness of our approach compared to the state-of-the-art. Our approach enables the possibility to use sparse GPs using a large number of inducing points without incurring a prohibitive computational cost.", "bibtex": "@InProceedings{pmlr-v139-tran21a,\n title = \t {Sparse within Sparse Gaussian Processes using Neighbor Information},\n author = {Tran, Gia-Lac and Milios, Dimitrios and Michiardi, Pietro and Filippone, Maurizio},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10369--10378},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tran21a/tran21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tran21a.html},\n abstract = \t {Approximations to Gaussian processes (GPs) based on inducing variables, combined with variational inference techniques, enable state-of-the-art sparse approaches to infer GPs at scale through mini-batch based learning. In this work, we further push the limits of scalability of sparse GPs by allowing large number of inducing variables without imposing a special structure on the inducing inputs. In particular, we introduce a novel hierarchical prior, which imposes sparsity on the set of inducing variables. We treat our model variationally, and we experimentally show considerable computational gains compared to standard sparse GPs when sparsity on the inducing variables is realized considering the nearest inducing inputs of a random mini-batch of the data. We perform an extensive experimental validation that demonstrates the effectiveness of our approach compared to the state-of-the-art. Our approach enables the possibility to use sparse GPs using a large number of inducing points without incurring a prohibitive computational cost.}\n}", "pdf": "http://proceedings.mlr.press/v139/tran21a/tran21a.pdf", "supp": "", "pdf_size": 5078979, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8031448998837041241&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Data Science, Eurecom, France+Department of Computer Science, National University of Singapore, Singapore; Department of Data Science, Eurecom, France; Department of Data Science, Eurecom, France; Department of Data Science, Eurecom, France", "aff_domain": "nus.edu.sg; ; ; ", "email": "nus.edu.sg; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tran21a.html", "aff_unique_index": "0+1;0;0;0", "aff_unique_norm": "EURECOM;National University of Singapore", "aff_unique_dep": "Department of Data Science;Department of Computer Science", "aff_unique_url": "https://www.eurecom.fr;https://www.nus.edu.sg", "aff_unique_abbr": ";NUS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0;0;0", "aff_country_unique": "France;Singapore" }, { "title": "SparseBERT: Rethinking the Importance Analysis in Self-attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9813", "id": "9813", "proceeding": "http://proceedings.mlr.press/v139/shi21a.html", "slides": "/media/icml-2021/Slides/9813.pdf", "author_site": "Han Shi, Jiahui Gao, Xiaozhe Ren, Hang Xu, Xiaodan Liang, Zhenguo Li, James Kwok", "author": "Han Shi; Jiahui Gao; Xiaozhe Ren; Hang Xu; Xiaodan Liang; Zhenguo Li; James Tin-Yau Kwok", "abstract": "Transformer-based models are popularly used in natural language processing (NLP). Its core component, self-attention, has aroused widespread interest. To understand the self-attention mechanism, a direct method is to visualize the attention map of a pre-trained model. Based on the patterns observed, a series of efficient Transformers with different sparse attention masks have been proposed. From a theoretical perspective, universal approximability of Transformer-based models is also recently proved. However, the above understanding and analysis of self-attention is based on a pre-trained model. To rethink the importance analysis in self-attention, we study the significance of different positions in attention matrix during pre-training. A surprising result is that diagonal elements in the attention map are the least important compared with other attention positions. We provide a proof showing that these diagonal elements can indeed be removed without deteriorating model performance. Furthermore, we propose a Differentiable Attention Mask (DAM) algorithm, which further guides the design of the SparseBERT. Extensive experiments verify our interesting findings and illustrate the effect of the proposed algorithm.", "bibtex": "@InProceedings{pmlr-v139-shi21a,\n title = \t {SparseBERT: Rethinking the Importance Analysis in Self-attention},\n author = {Shi, Han and Gao, Jiahui and Ren, Xiaozhe and Xu, Hang and Liang, Xiaodan and Li, Zhenguo and Kwok, James Tin-Yau},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9547--9557},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shi21a/shi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/shi21a.html},\n abstract = \t {Transformer-based models are popularly used in natural language processing (NLP). Its core component, self-attention, has aroused widespread interest. To understand the self-attention mechanism, a direct method is to visualize the attention map of a pre-trained model. Based on the patterns observed, a series of efficient Transformers with different sparse attention masks have been proposed. From a theoretical perspective, universal approximability of Transformer-based models is also recently proved. However, the above understanding and analysis of self-attention is based on a pre-trained model. To rethink the importance analysis in self-attention, we study the significance of different positions in attention matrix during pre-training. A surprising result is that diagonal elements in the attention map are the least important compared with other attention positions. We provide a proof showing that these diagonal elements can indeed be removed without deteriorating model performance. Furthermore, we propose a Differentiable Attention Mask (DAM) algorithm, which further guides the design of the SparseBERT. Extensive experiments verify our interesting findings and illustrate the effect of the proposed algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/shi21a/shi21a.pdf", "supp": "", "pdf_size": 2526840, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12872526059138861897&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Hong Kong University of Science and Technology, Hong Kong; The University of Hong Kong, Hong Kong; Huawei Noah\u2019s Ark Lab; Sun Yat-sen University, China; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Hong Kong University of Science and Technology, Hong Kong", "aff_domain": "cse.ust.hk; ; ; ; ; ; ", "email": "cse.ust.hk; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/shi21a.html", "aff_unique_index": "0;1;2;3;2;2;0", "aff_unique_norm": "Hong Kong University of Science and Technology;University of Hong Kong;Huawei;Sun Yat-sen University", "aff_unique_dep": ";;Noah\u2019s Ark Lab;", "aff_unique_url": "https://www.ust.hk;https://www.hku.hk;https://www.huawei.com;http://www.sysu.edu.cn", "aff_unique_abbr": "HKUST;HKU;Huawei;SYSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Hong Kong SAR;", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Sparsifying Networks via Subdifferential Inclusion", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10671", "id": "10671", "proceeding": "http://proceedings.mlr.press/v139/verma21b.html", "slides": "/media/icml-2021/Slides/10671.pdf", "author_site": "Sagar Verma, Jean-Christophe Pesquet", "author": "Sagar Verma; Jean-Christophe Pesquet", "abstract": "Sparsifying deep neural networks is of paramount interest in many areas, especially when those networks have to be implemented on low-memory devices. In this article, we propose a new formulation of the problem of generating sparse weights for a pre-trained neural network. By leveraging the properties of standard nonlinear activation functions, we show that the problem is equivalent to an approximate subdifferential inclusion problem. The accuracy of the approximation controls the sparsity. We show that the proposed approach is valid for a broad class of activation functions (ReLU, sigmoid, softmax). We propose an iterative optimization algorithm to induce sparsity whose convergence is guaranteed. Because of the algorithm flexibility, the sparsity can be ensured from partial training data in a minibatch manner. To demonstrate the effectiveness of our method, we perform experiments on various networks in different applicative contexts: image classification, speech recognition, natural language processing, and time-series forecasting.", "bibtex": "@InProceedings{pmlr-v139-verma21b,\n title = \t {Sparsifying Networks via Subdifferential Inclusion},\n author = {Verma, Sagar and Pesquet, Jean-Christophe},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10542--10552},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/verma21b/verma21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/verma21b.html},\n abstract = \t {Sparsifying deep neural networks is of paramount interest in many areas, especially when those networks have to be implemented on low-memory devices. In this article, we propose a new formulation of the problem of generating sparse weights for a pre-trained neural network. By leveraging the properties of standard nonlinear activation functions, we show that the problem is equivalent to an approximate subdifferential inclusion problem. The accuracy of the approximation controls the sparsity. We show that the proposed approach is valid for a broad class of activation functions (ReLU, sigmoid, softmax). We propose an iterative optimization algorithm to induce sparsity whose convergence is guaranteed. Because of the algorithm flexibility, the sparsity can be ensured from partial training data in a minibatch manner. To demonstrate the effectiveness of our method, we perform experiments on various networks in different applicative contexts: image classification, speech recognition, natural language processing, and time-series forecasting.}\n}", "pdf": "http://proceedings.mlr.press/v139/verma21b/verma21b.pdf", "supp": "", "pdf_size": 513297, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4947715078779422431&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Universit\u00e9 Paris-Saclay, CentraleSup\u00e9lec, Inria, Centre de Vision Num\u00e9rique; Universit\u00e9 Paris-Saclay, CentraleSup\u00e9lec, Inria, Centre de Vision Num\u00e9rique", "aff_domain": "centralesupelec.fr; ", "email": "centralesupelec.fr; ", "github": "", "project": "https://sagarverma.github.io/compression", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/verma21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Universit\u00e9 Paris-Saclay", "aff_unique_dep": "", "aff_unique_url": "https://www.universite-paris-saclay.fr", "aff_unique_abbr": "UPS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "France" }, { "title": "Sparsity-Agnostic Lasso Bandit", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9623", "id": "9623", "proceeding": "http://proceedings.mlr.press/v139/oh21a.html", "slides": "/media/icml-2021/Slides/9623.pdf", "author_site": "Min-hwan Oh, Garud Iyengar, Assaf Zeevi", "author": "Min-Hwan Oh; Garud Iyengar; Assaf Zeevi", "abstract": "We consider a stochastic contextual bandit problem where the dimension $d$ of the feature vectors is potentially large, however, only a sparse subset of features of cardinality $s_0 \\ll d$ affect the reward function. Essentially all existing algorithms for sparse bandits require a priori knowledge of the value of the sparsity index $s_0$. This knowledge is almost never available in practice, and misspecification of this parameter can lead to severe deterioration in the performance of existing methods. The main contribution of this paper is to propose an algorithm that does not require prior knowledge of the sparsity index $s_0$ and establish tight regret bounds on its performance under mild conditions. We also comprehensively evaluate our proposed algorithm numerically and show that it consistently outperforms existing methods, even when the correct sparsity index is revealed to them but is kept hidden from our algorithm.", "bibtex": "@InProceedings{pmlr-v139-oh21a,\n title = \t {Sparsity-Agnostic Lasso Bandit},\n author = {Oh, Min-Hwan and Iyengar, Garud and Zeevi, Assaf},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8271--8280},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/oh21a/oh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/oh21a.html},\n abstract = \t {We consider a stochastic contextual bandit problem where the dimension $d$ of the feature vectors is potentially large, however, only a sparse subset of features of cardinality $s_0 \\ll d$ affect the reward function. Essentially all existing algorithms for sparse bandits require a priori knowledge of the value of the sparsity index $s_0$. This knowledge is almost never available in practice, and misspecification of this parameter can lead to severe deterioration in the performance of existing methods. The main contribution of this paper is to propose an algorithm that does not require prior knowledge of the sparsity index $s_0$ and establish tight regret bounds on its performance under mild conditions. We also comprehensively evaluate our proposed algorithm numerically and show that it consistently outperforms existing methods, even when the correct sparsity index is revealed to them but is kept hidden from our algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v139/oh21a/oh21a.pdf", "supp": "", "pdf_size": 1285431, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15039056800751768549&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Seoul National University; Columbia University; Columbia University", "aff_domain": "snu.ac.kr; ; ", "email": "snu.ac.kr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/oh21a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Seoul National University;Columbia University", "aff_unique_dep": ";", "aff_unique_url": "https://www.snu.ac.kr;https://www.columbia.edu", "aff_unique_abbr": "SNU;Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1", "aff_country_unique": "South Korea;United States" }, { "title": "Spectral Normalisation for Deep Reinforcement Learning: An Optimisation Perspective", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9739", "id": "9739", "proceeding": "http://proceedings.mlr.press/v139/gogianu21a.html", "slides": "/media/icml-2021/Slides/9739.pdf", "author_site": "Florin Gogianu, Tudor Berariu, Mihaela Rosca, Claudia Clopath, Lucian Busoniu, Razvan Pascanu", "author": "Florin Gogianu; Tudor Berariu; Mihaela C Rosca; Claudia Clopath; Lucian Busoniu; Razvan Pascanu", "abstract": "Most of the recent deep reinforcement learning advances take an RL-centric perspective and focus on refinements of the training objective. We diverge from this view and show we can recover the performance of these developments not by changing the objective, but by regularising the value-function estimator. Constraining the Lipschitz constant of a single layer using spectral normalisation is sufficient to elevate the performance of a Categorical-DQN agent to that of a more elaborated agent on the challenging Atari domain. We conduct ablation studies to disentangle the various effects normalisation has on the learning dynamics and show that is sufficient to modulate the parameter updates to recover most of the performance of spectral normalisation. These findings hint towards the need to also focus on the neural component and its learning dynamics to tackle the peculiarities of Deep Reinforcement Learning.", "bibtex": "@InProceedings{pmlr-v139-gogianu21a,\n title = \t {Spectral Normalisation for Deep Reinforcement Learning: An Optimisation Perspective},\n author = {Gogianu, Florin and Berariu, Tudor and Rosca, Mihaela C and Clopath, Claudia and Busoniu, Lucian and Pascanu, Razvan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3734--3744},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gogianu21a/gogianu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gogianu21a.html},\n abstract = \t {Most of the recent deep reinforcement learning advances take an RL-centric perspective and focus on refinements of the training objective. We diverge from this view and show we can recover the performance of these developments not by changing the objective, but by regularising the value-function estimator. Constraining the Lipschitz constant of a single layer using spectral normalisation is sufficient to elevate the performance of a Categorical-DQN agent to that of a more elaborated agent on the challenging Atari domain. We conduct ablation studies to disentangle the various effects normalisation has on the learning dynamics and show that is sufficient to modulate the parameter updates to recover most of the performance of spectral normalisation. These findings hint towards the need to also focus on the neural component and its learning dynamics to tackle the peculiarities of Deep Reinforcement Learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/gogianu21a/gogianu21a.pdf", "supp": "", "pdf_size": 2352801, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1887962783436917172&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/gogianu21a.html" }, { "title": "Spectral Smoothing Unveils Phase Transitions in Hierarchical Variational Autoencoders", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9363", "id": "9363", "proceeding": "http://proceedings.mlr.press/v139/pervez21a.html", "slides": "", "author_site": "Adeel Pervez, Efstratios Gavves", "author": "Adeel Pervez; Efstratios Gavves", "abstract": "Variational autoencoders with deep hierarchies of stochastic layers have been known to suffer from the problem of posterior collapse, where the top layers fall back to the prior and become independent of input. We suggest that the hierarchical VAE objective explicitly includes the variance of the function parameterizing the mean and variance of the latent Gaussian distribution which itself is often a high variance function. Building on this we generalize VAE neural networks by incorporating a smoothing parameter motivated by Gaussian analysis to reduce higher frequency components and consequently the variance in parameterizing functions and show that this can help to solve the problem of posterior collapse. We further show that under such smoothing the VAE loss exhibits a phase transition, where the top layer KL divergence sharply drops to zero at a critical value of the smoothing parameter that is similar for the same model across datasets. We validate the phenomenon across model configurations and datasets.", "bibtex": "@InProceedings{pmlr-v139-pervez21a,\n title = \t {Spectral Smoothing Unveils Phase Transitions in Hierarchical Variational Autoencoders},\n author = {Pervez, Adeel and Gavves, Efstratios},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8536--8545},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/pervez21a/pervez21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/pervez21a.html},\n abstract = \t {Variational autoencoders with deep hierarchies of stochastic layers have been known to suffer from the problem of posterior collapse, where the top layers fall back to the prior and become independent of input. We suggest that the hierarchical VAE objective explicitly includes the variance of the function parameterizing the mean and variance of the latent Gaussian distribution which itself is often a high variance function. Building on this we generalize VAE neural networks by incorporating a smoothing parameter motivated by Gaussian analysis to reduce higher frequency components and consequently the variance in parameterizing functions and show that this can help to solve the problem of posterior collapse. We further show that under such smoothing the VAE loss exhibits a phase transition, where the top layer KL divergence sharply drops to zero at a critical value of the smoothing parameter that is similar for the same model across datasets. We validate the phenomenon across model configurations and datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/pervez21a/pervez21a.pdf", "supp": "", "pdf_size": 3459672, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16803104284083185176&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "QUV A Lab, Informatics Institute, University of Amsterdam, The Netherlands; QUV A Lab, Informatics Institute, University of Amsterdam, The Netherlands", "aff_domain": "uva.nl; ", "email": "uva.nl; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/pervez21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Amsterdam", "aff_unique_dep": "Informatics Institute", "aff_unique_url": "https://www.uva.nl", "aff_unique_abbr": "UvA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "title": "Spectral vertex sparsifiers and pair-wise spanners over distributed graphs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8983", "id": "8983", "proceeding": "http://proceedings.mlr.press/v139/zhu21c.html", "slides": "", "author_site": "Chunjiang Zhu, Qinqing Liu, Jinbo Bi", "author": "Chunjiang Zhu; Qinqing Liu; Jinbo Bi", "abstract": "Graph sparsification is a powerful tool to approximate an arbitrary graph and has been used in machine learning over graphs. As real-world networks are becoming very large and naturally distributed, distributed graph sparsification has drawn considerable attention. In this work, we design communication-efficient distributed algorithms for constructing spectral vertex sparsifiers, which closely preserve effective resistance distances on a subset of vertices of interest in the original graphs, under the well-established message passing communication model. We prove that the communication cost approximates the lower bound with only a small gap. We further provide algorithms for constructing pair-wise spanners which approximate the shortest distances between each pair of vertices in a target set, instead of all pairs, and incur communication costs that are much smaller than those of existing algorithms in the message passing model. Experiments are performed to validate the communication efficiency of the proposed algorithms under the guarantee that the constructed sparsifiers have a good approximation quality.", "bibtex": "@InProceedings{pmlr-v139-zhu21c,\n title = \t {Spectral vertex sparsifiers and pair-wise spanners over distributed graphs},\n author = {Zhu, Chunjiang and Liu, Qinqing and Bi, Jinbo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12890--12900},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhu21c/zhu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhu21c.html},\n abstract = \t {Graph sparsification is a powerful tool to approximate an arbitrary graph and has been used in machine learning over graphs. As real-world networks are becoming very large and naturally distributed, distributed graph sparsification has drawn considerable attention. In this work, we design communication-efficient distributed algorithms for constructing spectral vertex sparsifiers, which closely preserve effective resistance distances on a subset of vertices of interest in the original graphs, under the well-established message passing communication model. We prove that the communication cost approximates the lower bound with only a small gap. We further provide algorithms for constructing pair-wise spanners which approximate the shortest distances between each pair of vertices in a target set, instead of all pairs, and incur communication costs that are much smaller than those of existing algorithms in the message passing model. Experiments are performed to validate the communication efficiency of the proposed algorithms under the guarantee that the constructed sparsifiers have a good approximation quality.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhu21c/zhu21c.pdf", "supp": "", "pdf_size": 1060600, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6105461280329751544&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, University of North Carolina at Greensboro, Greensboro, NC, USA; Department of Computer Science and Engineering, University of Connecticut, Storrs, CT, USA; Department of Computer Science and Engineering, University of Connecticut, Storrs, CT, USA", "aff_domain": "uncg.edu; ;uconn.edu", "email": "uncg.edu; ;uconn.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhu21c.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of North Carolina at Greensboro;University of Connecticut", "aff_unique_dep": "Department of Computer Science;Department of Computer Science and Engineering", "aff_unique_url": "https://www.uncg.edu;https://www.uconn.edu", "aff_unique_abbr": "UNCG;UConn", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "Greensboro;Storrs", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "SpreadsheetCoder: Formula Prediction from Semi-structured Context", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8713", "id": "8713", "proceeding": "http://proceedings.mlr.press/v139/chen21m.html", "slides": "", "author_site": "Xinyun Chen, Petros Maniatis, Rishabh Singh, Charles Sutton, Hanjun Dai, Max Lin, Denny Zhou", "author": "Xinyun Chen; Petros Maniatis; Rishabh Singh; Charles Sutton; Hanjun Dai; Max Lin; Denny Zhou", "abstract": "Spreadsheet formula prediction has been an important program synthesis problem with many real-world applications. Previous works typically utilize input-output examples as the specification for spreadsheet formula synthesis, where each input-output pair simulates a separate row in the spreadsheet. However, this formulation does not fully capture the rich context in real-world spreadsheets. First, spreadsheet data entries are organized as tables, thus rows and columns are not necessarily independent from each other. In addition, many spreadsheet tables include headers, which provide high-level descriptions of the cell data. However, previous synthesis approaches do not consider headers as part of the specification. In this work, we present the first approach for synthesizing spreadsheet formulas from tabular context, which includes both headers and semi-structured tabular data. In particular, we propose SpreadsheetCoder, a BERT-based model architecture to represent the tabular context in both row-based and column-based formats. We train our model on a large dataset of spreadsheets, and demonstrate that SpreadsheetCoder achieves top-1 prediction accuracy of 42.51%, which is a considerable improvement over baselines that do not employ rich tabular context. Compared to the rule-based system, SpreadsheetCoder assists 82% more users in composing formulas on Google Sheets.", "bibtex": "@InProceedings{pmlr-v139-chen21m,\n title = \t {SpreadsheetCoder: Formula Prediction from Semi-structured Context},\n author = {Chen, Xinyun and Maniatis, Petros and Singh, Rishabh and Sutton, Charles and Dai, Hanjun and Lin, Max and Zhou, Denny},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1661--1672},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21m/chen21m.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21m.html},\n abstract = \t {Spreadsheet formula prediction has been an important program synthesis problem with many real-world applications. Previous works typically utilize input-output examples as the specification for spreadsheet formula synthesis, where each input-output pair simulates a separate row in the spreadsheet. However, this formulation does not fully capture the rich context in real-world spreadsheets. First, spreadsheet data entries are organized as tables, thus rows and columns are not necessarily independent from each other. In addition, many spreadsheet tables include headers, which provide high-level descriptions of the cell data. However, previous synthesis approaches do not consider headers as part of the specification. In this work, we present the first approach for synthesizing spreadsheet formulas from tabular context, which includes both headers and semi-structured tabular data. In particular, we propose SpreadsheetCoder, a BERT-based model architecture to represent the tabular context in both row-based and column-based formats. We train our model on a large dataset of spreadsheets, and demonstrate that SpreadsheetCoder achieves top-1 prediction accuracy of 42.51%, which is a considerable improvement over baselines that do not employ rich tabular context. Compared to the rule-based system, SpreadsheetCoder assists 82% more users in composing formulas on Google Sheets.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21m/chen21m.pdf", "supp": "", "pdf_size": 1057692, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=422033345602932532&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "UC Berkeley; Google; Google; Google; Google; Google; Google", "aff_domain": "berkeley.edu; ; ; ; ; ; ", "email": "berkeley.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/chen21m.html", "aff_unique_index": "0;1;1;1;1;1;1", "aff_unique_norm": "University of California, Berkeley;Google", "aff_unique_dep": ";Google", "aff_unique_url": "https://www.berkeley.edu;https://www.google.com", "aff_unique_abbr": "UC Berkeley;Google", "aff_campus_unique_index": "0;1;1;1;1;1;1", "aff_campus_unique": "Berkeley;Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Stability and Convergence of Stochastic Gradient Clipping: Beyond Lipschitz Continuity and Smoothness", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10373", "id": "10373", "proceeding": "http://proceedings.mlr.press/v139/mai21a.html", "slides": "/media/icml-2021/Slides/10373.pdf", "author_site": "Vien Mai, Mikael Johansson", "author": "Vien V. Mai; Mikael Johansson", "abstract": "Stochastic gradient algorithms are often unstable when applied to functions that do not have Lipschitz-continuous and/or bounded gradients. Gradient clipping is a simple and effective technique to stabilize the training process for problems that are prone to the exploding gradient problem. Despite its widespread popularity, the convergence properties of the gradient clipping heuristic are poorly understood, especially for stochastic problems. This paper establishes both qualitative and quantitative convergence results of the clipped stochastic (sub)gradient method (SGD) for non-smooth convex functions with rapidly growing subgradients. Our analyses show that clipping enhances the stability of SGD and that the clipped SGD algorithm enjoys finite convergence rates in many cases. We also study the convergence of a clipped method with momentum, which includes clipped SGD as a special case, for weakly convex problems under standard assumptions. With a novel Lyapunov analysis, we show that the proposed method achieves the best-known rate for the considered class of problems, demonstrating the effectiveness of clipped methods also in this regime. Numerical results confirm our theoretical developments.", "bibtex": "@InProceedings{pmlr-v139-mai21a,\n title = \t {Stability and Convergence of Stochastic Gradient Clipping: Beyond Lipschitz Continuity and Smoothness},\n author = {Mai, Vien V. and Johansson, Mikael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7325--7335},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mai21a/mai21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mai21a.html},\n abstract = \t {Stochastic gradient algorithms are often unstable when applied to functions that do not have Lipschitz-continuous and/or bounded gradients. Gradient clipping is a simple and effective technique to stabilize the training process for problems that are prone to the exploding gradient problem. Despite its widespread popularity, the convergence properties of the gradient clipping heuristic are poorly understood, especially for stochastic problems. This paper establishes both qualitative and quantitative convergence results of the clipped stochastic (sub)gradient method (SGD) for non-smooth convex functions with rapidly growing subgradients. Our analyses show that clipping enhances the stability of SGD and that the clipped SGD algorithm enjoys finite convergence rates in many cases. We also study the convergence of a clipped method with momentum, which includes clipped SGD as a special case, for weakly convex problems under standard assumptions. With a novel Lyapunov analysis, we show that the proposed method achieves the best-known rate for the considered class of problems, demonstrating the effectiveness of clipped methods also in this regime. Numerical results confirm our theoretical developments.}\n}", "pdf": "http://proceedings.mlr.press/v139/mai21a/mai21a.pdf", "supp": "", "pdf_size": 831581, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2864644052635423127&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Division of Decision and Control Systems, EECS, KTH Royal Institute of Technology, Stockholm, Sweden; Division of Decision and Control Systems, EECS, KTH Royal Institute of Technology, Stockholm, Sweden", "aff_domain": "kth.se;kth.se", "email": "kth.se;kth.se", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/mai21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "KTH Royal Institute of Technology", "aff_unique_dep": "Division of Decision and Control Systems", "aff_unique_url": "https://www.kth.se", "aff_unique_abbr": "KTH", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stockholm", "aff_country_unique_index": "0;0", "aff_country_unique": "Sweden" }, { "title": "Stability and Generalization of Stochastic Gradient Methods for Minimax Problems", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10015", "id": "10015", "proceeding": "http://proceedings.mlr.press/v139/lei21b.html", "slides": "", "author_site": "Yunwen Lei, Zhenhuan Yang, Tianbao Yang, Yiming Ying", "author": "Yunwen Lei; Zhenhuan Yang; Tianbao Yang; Yiming Ying", "abstract": "Many machine learning problems can be formulated as minimax problems such as Generative Adversarial Networks (GANs), AUC maximization and robust estimation, to mention but a few. A substantial amount of studies are devoted to studying the convergence behavior of their stochastic gradient-type algorithms. In contrast, there is relatively little work on understanding their generalization, i.e., how the learning models built from training examples would behave on test examples. In this paper, we provide a comprehensive generalization analysis of stochastic gradient methods for minimax problems under both convex-concave and nonconvex-nonconcave cases through the lens of algorithmic stability. We establish a quantitative connection between stability and several generalization measures both in expectation and with high probability. For the convex-concave setting, our stability analysis shows that stochastic gradient descent ascent attains optimal generalization bounds for both smooth and nonsmooth minimax problems. We also establish generalization bounds for both weakly-convex-weakly-concave and gradient-dominated problems. We report preliminary experimental results to verify our theory.", "bibtex": "@InProceedings{pmlr-v139-lei21b,\n title = \t {Stability and Generalization of Stochastic Gradient Methods for Minimax Problems},\n author = {Lei, Yunwen and Yang, Zhenhuan and Yang, Tianbao and Ying, Yiming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6175--6186},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lei21b/lei21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/lei21b.html},\n abstract = \t {Many machine learning problems can be formulated as minimax problems such as Generative Adversarial Networks (GANs), AUC maximization and robust estimation, to mention but a few. A substantial amount of studies are devoted to studying the convergence behavior of their stochastic gradient-type algorithms. In contrast, there is relatively little work on understanding their generalization, i.e., how the learning models built from training examples would behave on test examples. In this paper, we provide a comprehensive generalization analysis of stochastic gradient methods for minimax problems under both convex-concave and nonconvex-nonconcave cases through the lens of algorithmic stability. We establish a quantitative connection between stability and several generalization measures both in expectation and with high probability. For the convex-concave setting, our stability analysis shows that stochastic gradient descent ascent attains optimal generalization bounds for both smooth and nonsmooth minimax problems. We also establish generalization bounds for both weakly-convex-weakly-concave and gradient-dominated problems. We report preliminary experimental results to verify our theory.}\n}", "pdf": "http://proceedings.mlr.press/v139/lei21b/lei21b.pdf", "supp": "", "pdf_size": 925235, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5282146573067352151&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 15, "aff": "School of Computer Science, University of Birmingham, Birmingham B15 2TT, UK+Department of Mathematics and Statistics, State University of New York at Albany, USA; Department of Mathematics and Statistics, State University of New York at Albany, USA; Department of Computer Science, The University of Iowa, Iowa City, IA 52242, USA; Department of Mathematics and Statistics, State University of New York at Albany, USA", "aff_domain": "albany.edu; ; ;albany.edu", "email": "albany.edu; ; ;albany.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lei21b.html", "aff_unique_index": "0+1;1;2;1", "aff_unique_norm": "University of Birmingham;State University of New York at Albany;University of Iowa", "aff_unique_dep": "School of Computer Science;Department of Mathematics and Statistics;Department of Computer Science", "aff_unique_url": "https://www.birmingham.ac.uk;https://www.albany.edu;https://www.uiowa.edu", "aff_unique_abbr": "UoB;SUNY Albany;UIowa", "aff_campus_unique_index": "0+1;1;2;1", "aff_campus_unique": "Birmingham;Albany;Iowa City", "aff_country_unique_index": "0+1;1;1;1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Stabilizing Equilibrium Models by Jacobian Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10635", "id": "10635", "proceeding": "http://proceedings.mlr.press/v139/bai21b.html", "slides": "", "author_site": "Shaojie Bai, Vladlen Koltun, Zico Kolter", "author": "Shaojie Bai; Vladlen Koltun; Zico Kolter", "abstract": "Deep equilibrium networks (DEQs) are a new class of models that eschews traditional depth in favor of finding the fixed point of a single non-linear layer. These models have been shown to achieve performance competitive with the state-of-the-art deep networks while using significantly less memory. Yet they are also slower, brittle to architectural choices, and introduce potential instability to the model. In this paper, we propose a regularization scheme for DEQ models that explicitly regularizes the Jacobian of the fixed-point update equations to stabilize the learning of equilibrium models. We show that this regularization adds only minimal computational cost, significantly stabilizes the fixed-point convergence in both forward and backward passes, and scales well to high-dimensional, realistic domains (e.g., WikiText-103 language modeling and ImageNet classification). Using this method, we demonstrate, for the first time, an implicit-depth model that runs with approximately the same speed and level of performance as popular conventional deep networks such as ResNet-101, while still maintaining the constant memory footprint and architectural simplicity of DEQs. Code is available https://github.com/locuslab/deq.", "bibtex": "@InProceedings{pmlr-v139-bai21b,\n title = \t {Stabilizing Equilibrium Models by Jacobian Regularization},\n author = {Bai, Shaojie and Koltun, Vladlen and Kolter, Zico},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {554--565},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bai21b/bai21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/bai21b.html},\n abstract = \t {Deep equilibrium networks (DEQs) are a new class of models that eschews traditional depth in favor of finding the fixed point of a single non-linear layer. These models have been shown to achieve performance competitive with the state-of-the-art deep networks while using significantly less memory. Yet they are also slower, brittle to architectural choices, and introduce potential instability to the model. In this paper, we propose a regularization scheme for DEQ models that explicitly regularizes the Jacobian of the fixed-point update equations to stabilize the learning of equilibrium models. We show that this regularization adds only minimal computational cost, significantly stabilizes the fixed-point convergence in both forward and backward passes, and scales well to high-dimensional, realistic domains (e.g., WikiText-103 language modeling and ImageNet classification). Using this method, we demonstrate, for the first time, an implicit-depth model that runs with approximately the same speed and level of performance as popular conventional deep networks such as ResNet-101, while still maintaining the constant memory footprint and architectural simplicity of DEQs. Code is available https://github.com/locuslab/deq.}\n}", "pdf": "http://proceedings.mlr.press/v139/bai21b/bai21b.pdf", "supp": "", "pdf_size": 7917937, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7648841566854588035&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Carnegie Mellon University; Intel Labs; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bai21b.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Carnegie Mellon University;Intel", "aff_unique_dep": ";Intel Labs", "aff_unique_url": "https://www.cmu.edu;https://www.intel.com", "aff_unique_abbr": "CMU;Intel", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "State Entropy Maximization with Random Encoders for Efficient Exploration", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9857", "id": "9857", "proceeding": "http://proceedings.mlr.press/v139/seo21a.html", "slides": "/media/icml-2021/Slides/9857.pdf", "author_site": "Younggyo Seo, Lili Chen, Jinwoo Shin, Honglak Lee, Pieter Abbeel, Kimin Lee", "author": "Younggyo Seo; Lili Chen; Jinwoo Shin; Honglak Lee; Pieter Abbeel; Kimin Lee", "abstract": "Recent exploration methods have proven to be a recipe for improving sample-efficiency in deep reinforcement learning (RL). However, efficient exploration in high-dimensional observation spaces still remains a challenge. This paper presents Random Encoders for Efficient Exploration (RE3), an exploration method that utilizes state entropy as an intrinsic reward. In order to estimate state entropy in environments with high-dimensional observations, we utilize a k-nearest neighbor entropy estimator in the low-dimensional representation space of a convolutional encoder. In particular, we find that the state entropy can be estimated in a stable and compute-efficient manner by utilizing a randomly initialized encoder, which is fixed throughout training. Our experiments show that RE3 significantly improves the sample-efficiency of both model-free and model-based RL methods on locomotion and navigation tasks from DeepMind Control Suite and MiniGrid benchmarks. We also show that RE3 allows learning diverse behaviors without extrinsic rewards, effectively improving sample-efficiency in downstream tasks.", "bibtex": "@InProceedings{pmlr-v139-seo21a,\n title = \t {State Entropy Maximization with Random Encoders for Efficient Exploration},\n author = {Seo, Younggyo and Chen, Lili and Shin, Jinwoo and Lee, Honglak and Abbeel, Pieter and Lee, Kimin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9443--9454},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/seo21a/seo21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/seo21a.html},\n abstract = \t {Recent exploration methods have proven to be a recipe for improving sample-efficiency in deep reinforcement learning (RL). However, efficient exploration in high-dimensional observation spaces still remains a challenge. This paper presents Random Encoders for Efficient Exploration (RE3), an exploration method that utilizes state entropy as an intrinsic reward. In order to estimate state entropy in environments with high-dimensional observations, we utilize a k-nearest neighbor entropy estimator in the low-dimensional representation space of a convolutional encoder. In particular, we find that the state entropy can be estimated in a stable and compute-efficient manner by utilizing a randomly initialized encoder, which is fixed throughout training. Our experiments show that RE3 significantly improves the sample-efficiency of both model-free and model-based RL methods on locomotion and navigation tasks from DeepMind Control Suite and MiniGrid benchmarks. We also show that RE3 allows learning diverse behaviors without extrinsic rewards, effectively improving sample-efficiency in downstream tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/seo21a/seo21a.pdf", "supp": "", "pdf_size": 5999660, "gs_citation": 177, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14430212211831695108&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "KAIST; UC Berkeley; KAIST; University of Michigan+LG AI Research; UC Berkeley; UC Berkeley", "aff_domain": "berkeley.edu; ; ; ; ;berkeley.edu", "email": "berkeley.edu; ; ; ; ;berkeley.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/seo21a.html", "aff_unique_index": "0;1;0;2+3;1;1", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;University of California, Berkeley;University of Michigan;LG", "aff_unique_dep": ";;;LG AI Research", "aff_unique_url": "https://www.kaist.ac.kr;https://www.berkeley.edu;https://www.umich.edu;https://www.lgaires.com", "aff_unique_abbr": "KAIST;UC Berkeley;UM;LG AI", "aff_campus_unique_index": "1;;1;1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;1;0;1+0;1;1", "aff_country_unique": "South Korea;United States" }, { "title": "State Relevance for Off-Policy Evaluation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9393", "id": "9393", "proceeding": "http://proceedings.mlr.press/v139/shen21d.html", "slides": "", "author_site": "Simon Shen, Jason Yecheng Ma, Omer Gottesman, Finale Doshi-Velez", "author": "Simon P Shen; Yecheng Ma; Omer Gottesman; Finale Doshi-Velez", "abstract": "Importance sampling-based estimators for off-policy evaluation (OPE) are valued for their simplicity, unbiasedness, and reliance on relatively few assumptions. However, the variance of these estimators is often high, especially when trajectories are of different lengths. In this work, we introduce Omitting-States-Irrelevant-to-Return Importance Sampling (OSIRIS), an estimator which reduces variance by strategically omitting likelihood ratios associated with certain states. We formalize the conditions under which OSIRIS is unbiased and has lower variance than ordinary importance sampling, and we demonstrate these properties empirically.", "bibtex": "@InProceedings{pmlr-v139-shen21d,\n title = \t {State Relevance for Off-Policy Evaluation},\n author = {Shen, Simon P and Ma, Yecheng and Gottesman, Omer and Doshi-Velez, Finale},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9537--9546},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shen21d/shen21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/shen21d.html},\n abstract = \t {Importance sampling-based estimators for off-policy evaluation (OPE) are valued for their simplicity, unbiasedness, and reliance on relatively few assumptions. However, the variance of these estimators is often high, especially when trajectories are of different lengths. In this work, we introduce Omitting-States-Irrelevant-to-Return Importance Sampling (OSIRIS), an estimator which reduces variance by strategically omitting likelihood ratios associated with certain states. We formalize the conditions under which OSIRIS is unbiased and has lower variance than ordinary importance sampling, and we demonstrate these properties empirically.}\n}", "pdf": "http://proceedings.mlr.press/v139/shen21d/shen21d.pdf", "supp": "", "pdf_size": 6390547, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1184988858503207705&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Harvard University, Cambridge, MA; University of Pennsylvania, Philadelphia, PA; Brown University, Providence, RI; Harvard University, Cambridge, MA", "aff_domain": "fas.harvard.edu; ; ;seas.harvard.edu", "email": "fas.harvard.edu; ; ;seas.harvard.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/shen21d.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Harvard University;University of Pennsylvania;Brown University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.harvard.edu;https://www.upenn.edu;https://www.brown.edu", "aff_unique_abbr": "Harvard;UPenn;Brown", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "Cambridge;Philadelphia;Providence", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Statistical Estimation from Dependent Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10333", "id": "10333", "proceeding": "http://proceedings.mlr.press/v139/kandiros21a.html", "slides": "/media/icml-2021/Slides/10333_qJuiZyl.pdf", "author_site": "Vardis Kandiros, Yuval Dagan, Nishanth Dikkala, Surbhi Goel, Constantinos Daskalakis", "author": "Vardis Kandiros; Yuval Dagan; Nishanth Dikkala; Surbhi Goel; Constantinos Daskalakis", "abstract": "We consider a general statistical estimation problem wherein binary labels across different observations are not independent conditioning on their feature vectors, but dependent, capturing settings where e.g. these observations are collected on a spatial domain, a temporal domain, or a social network, which induce dependencies. We model these dependencies in the language of Markov Random Fields and, importantly, allow these dependencies to be substantial, i.e. do not assume that the Markov Random Field capturing these dependencies is in high temperature. As our main contribution we provide algorithms and statistically efficient estimation rates for this model, giving several instantiations of our bounds in logistic regression, sparse logistic regression, and neural network regression settings with dependent data. Our estimation guarantees follow from novel results for estimating the parameters (i.e. external fields and interaction strengths) of Ising models from a single sample.", "bibtex": "@InProceedings{pmlr-v139-kandiros21a,\n title = \t {Statistical Estimation from Dependent Data},\n author = {Kandiros, Vardis and Dagan, Yuval and Dikkala, Nishanth and Goel, Surbhi and Daskalakis, Constantinos},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5269--5278},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kandiros21a/kandiros21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kandiros21a.html},\n abstract = \t {We consider a general statistical estimation problem wherein binary labels across different observations are not independent conditioning on their feature vectors, but dependent, capturing settings where e.g. these observations are collected on a spatial domain, a temporal domain, or a social network, which induce dependencies. We model these dependencies in the language of Markov Random Fields and, importantly, allow these dependencies to be substantial, i.e. do not assume that the Markov Random Field capturing these dependencies is in high temperature. As our main contribution we provide algorithms and statistically efficient estimation rates for this model, giving several instantiations of our bounds in logistic regression, sparse logistic regression, and neural network regression settings with dependent data. Our estimation guarantees follow from novel results for estimating the parameters (i.e. external fields and interaction strengths) of Ising models from a single sample.}\n}", "pdf": "http://proceedings.mlr.press/v139/kandiros21a/kandiros21a.pdf", "supp": "", "pdf_size": 446865, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10418781720711784766&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 2, "aff": "MIT EECS; MIT EECS; Google Research; Microsoft Research NYC; MIT EECS", "aff_domain": "mit.edu;mit.edu;google.com;microsoft.com;mit.edu", "email": "mit.edu;mit.edu;google.com;microsoft.com;mit.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/kandiros21a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Massachusetts Institute of Technology;Google;Microsoft", "aff_unique_dep": "Electrical Engineering & Computer Science;Google Research;Microsoft Research", "aff_unique_url": "https://web.mit.edu;https://research.google;https://www.microsoft.com/en-us/research/group/microsoft-research-new-york-city", "aff_unique_abbr": "MIT;Google Research;MSR NYC", "aff_campus_unique_index": "0;0;1;2;0", "aff_campus_unique": "Cambridge;Mountain View;New York City", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Iterative Graph Matching", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9913", "id": "9913", "proceeding": "http://proceedings.mlr.press/v139/liu21i.html", "slides": "", "author_site": "Linfeng Liu, Michael Hughes, Soha Hassoun, Liping Liu", "author": "Linfeng Liu; Michael C Hughes; Soha Hassoun; Liping Liu", "abstract": "Recent works apply Graph Neural Networks (GNNs) to graph matching tasks and show promising results. Considering that model outputs are complex matchings, we devise several techniques to improve the learning of GNNs and obtain a new model, Stochastic Iterative Graph MAtching (SIGMA). Our model predicts a distribution of matchings, instead of a single matching, for a graph pair so the model can explore several probable matchings. We further introduce a novel multi-step matching procedure, which learns how to refine a graph pair\u2019s matching results incrementally. The model also includes dummy nodes so that the model does not have to find matchings for nodes without correspondence. We fit this model to data via scalable stochastic optimization. We conduct extensive experiments across synthetic graph datasets as well as biochemistry and computer vision applications. Across all tasks, our results show that SIGMA can produce significantly improved graph matching results compared to state-of-the-art models. Ablation studies verify that each of our components (stochastic training, iterative matching, and dummy nodes) offers noticeable improvement.", "bibtex": "@InProceedings{pmlr-v139-liu21i,\n title = \t {Stochastic Iterative Graph Matching},\n author = {Liu, Linfeng and Hughes, Michael C and Hassoun, Soha and Liu, Liping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6815--6825},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21i/liu21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21i.html},\n abstract = \t {Recent works apply Graph Neural Networks (GNNs) to graph matching tasks and show promising results. Considering that model outputs are complex matchings, we devise several techniques to improve the learning of GNNs and obtain a new model, Stochastic Iterative Graph MAtching (SIGMA). Our model predicts a distribution of matchings, instead of a single matching, for a graph pair so the model can explore several probable matchings. We further introduce a novel multi-step matching procedure, which learns how to refine a graph pair\u2019s matching results incrementally. The model also includes dummy nodes so that the model does not have to find matchings for nodes without correspondence. We fit this model to data via scalable stochastic optimization. We conduct extensive experiments across synthetic graph datasets as well as biochemistry and computer vision applications. Across all tasks, our results show that SIGMA can produce significantly improved graph matching results compared to state-of-the-art models. Ablation studies verify that each of our components (stochastic training, iterative matching, and dummy nodes) offers noticeable improvement.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21i/liu21i.pdf", "supp": "", "pdf_size": 4104850, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12476907086430582524&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, Tufts University, MA, USA+Department of Chemical and Biological Engineering, Tufts University, MA, USA; Department of Computer Science, Tufts University, MA, USA; Department of Computer Science, Tufts University, MA, USA+Department of Chemical and Biological Engineering, Tufts University, MA, USA; Department of Computer Science, Tufts University, MA, USA", "aff_domain": "tufts.edu; ; ; ", "email": "tufts.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liu21i.html", "aff_unique_index": "0+0;0;0+0;0", "aff_unique_norm": "Tufts University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.tufts.edu", "aff_unique_abbr": "Tufts", "aff_campus_unique_index": "0+0;0;0+0;0", "aff_campus_unique": "Medford", "aff_country_unique_index": "0+0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Multi-Armed Bandits with Unrestricted Delay Distributions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10697", "id": "10697", "proceeding": "http://proceedings.mlr.press/v139/lancewicki21a.html", "slides": "", "author_site": "Tal Lancewicki, Shahar Segal, Tomer Koren, Yishay Mansour", "author": "Tal Lancewicki; Shahar Segal; Tomer Koren; Yishay Mansour", "abstract": "We study the stochastic Multi-Armed Bandit\u00a0(MAB) problem with random delays in the feedback received by the algorithm. We consider two settings: the {\\it reward dependent} delay setting, where realized delays may depend on the stochastic rewards, and the {\\it reward-independent} delay setting. Our main contribution is algorithms that achieve near-optimal regret in each of the settings, with an additional additive dependence on the quantiles of the delay distribution. Our results do not make any assumptions on the delay distributions: in particular, we do not assume they come from any parametric family of distributions and allow for unbounded support and expectation; we further allow for the case of infinite delays where the algorithm might occasionally not observe any feedback.", "bibtex": "@InProceedings{pmlr-v139-lancewicki21a,\n title = \t {Stochastic Multi-Armed Bandits with Unrestricted Delay Distributions},\n author = {Lancewicki, Tal and Segal, Shahar and Koren, Tomer and Mansour, Yishay},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5969--5978},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lancewicki21a/lancewicki21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lancewicki21a.html},\n abstract = \t {We study the stochastic Multi-Armed Bandit\u00a0(MAB) problem with random delays in the feedback received by the algorithm. We consider two settings: the {\\it reward dependent} delay setting, where realized delays may depend on the stochastic rewards, and the {\\it reward-independent} delay setting. Our main contribution is algorithms that achieve near-optimal regret in each of the settings, with an additional additive dependence on the quantiles of the delay distribution. Our results do not make any assumptions on the delay distributions: in particular, we do not assume they come from any parametric family of distributions and allow for unbounded support and expectation; we further allow for the case of infinite delays where the algorithm might occasionally not observe any feedback.}\n}", "pdf": "http://proceedings.mlr.press/v139/lancewicki21a/lancewicki21a.pdf", "supp": "", "pdf_size": 412403, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3964720789745360665&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Blavatnik School of Computer Science, Tel Aviv University, Israel+Google Research, Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University, Israel+Google Research, Tel Aviv; Google Research, Tel Aviv; Blavatnik School of Computer Science, Tel Aviv University, Israel+Google Research, Tel Aviv", "aff_domain": "mail.tau.ac.il;mail.tau.ac.il; ; ", "email": "mail.tau.ac.il;mail.tau.ac.il; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lancewicki21a.html", "aff_unique_index": "0+1;0+1;1;0+1", "aff_unique_norm": "Tel Aviv University;Google", "aff_unique_dep": "Blavatnik School of Computer Science;Google Research", "aff_unique_url": "https://www.tau.ac.il;https://research.google", "aff_unique_abbr": "TAU;Google", "aff_campus_unique_index": "0+0;0+0;0;0+0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0+0;0+0;0;0+0", "aff_country_unique": "Israel" }, { "title": "Stochastic Sign Descent Methods: New Algorithms and Better Theory", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9485", "id": "9485", "proceeding": "http://proceedings.mlr.press/v139/safaryan21a.html", "slides": "", "author_site": "Mher Safaryan, Peter Richtarik", "author": "Mher Safaryan; Peter Richtarik", "abstract": "Various gradient compression schemes have been proposed to mitigate the communication cost in distributed training of large scale machine learning models. Sign-based methods, such as signSGD (Bernstein et al., 2018), have recently been gaining popularity because of their simple compression rule and connection to adaptive gradient methods, like ADAM. In this paper, we analyze sign-based methods for non-convex optimization in three key settings: (i) standard single node, (ii) parallel with shared data and (iii) distributed with partitioned data. For single machine case, we generalize the previous analysis of signSGD relying on intuitive bounds on success probabilities and allowing even biased estimators. Furthermore, we extend the analysis to parallel setting within a parameter server framework, where exponentially fast noise reduction is guaranteed with respect to number of nodes, maintaining $1$-bit compression in both directions and using small mini-batch sizes. Next, we identify a fundamental issue with signSGD to converge in distributed environment. To resolve this issue, we propose a new sign-based method, {\\em Stochastic Sign Descent with Momentum (SSDM)}, which converges under standard bounded variance assumption with the optimal asymptotic rate. We validate several aspects of our theoretical findings with numerical experiments.", "bibtex": "@InProceedings{pmlr-v139-safaryan21a,\n title = \t {Stochastic Sign Descent Methods: New Algorithms and Better Theory},\n author = {Safaryan, Mher and Richtarik, Peter},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9224--9234},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/safaryan21a/safaryan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/safaryan21a.html},\n abstract = \t {Various gradient compression schemes have been proposed to mitigate the communication cost in distributed training of large scale machine learning models. Sign-based methods, such as signSGD (Bernstein et al., 2018), have recently been gaining popularity because of their simple compression rule and connection to adaptive gradient methods, like ADAM. In this paper, we analyze sign-based methods for non-convex optimization in three key settings: (i) standard single node, (ii) parallel with shared data and (iii) distributed with partitioned data. For single machine case, we generalize the previous analysis of signSGD relying on intuitive bounds on success probabilities and allowing even biased estimators. Furthermore, we extend the analysis to parallel setting within a parameter server framework, where exponentially fast noise reduction is guaranteed with respect to number of nodes, maintaining $1$-bit compression in both directions and using small mini-batch sizes. Next, we identify a fundamental issue with signSGD to converge in distributed environment. To resolve this issue, we propose a new sign-based method, {\\em Stochastic Sign Descent with Momentum (SSDM)}, which converges under standard bounded variance assumption with the optimal asymptotic rate. We validate several aspects of our theoretical findings with numerical experiments.}\n}", "pdf": "http://proceedings.mlr.press/v139/safaryan21a/safaryan21a.pdf", "supp": "", "pdf_size": 1773980, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18114309112532872426&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "KAUST, Saudi Arabia; MIPT, Russia", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/safaryan21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "King Abdullah University of Science and Technology;Moscow Institute of Physics and Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.kaust.edu.sa;https://www.mipt.ru/en", "aff_unique_abbr": "KAUST;MIPT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "Saudi Arabia;Russian Federation" }, { "title": "Straight to the Gradient: Learning to Use Novel Tokens for Neural Text Generation", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8965", "id": "8965", "proceeding": "http://proceedings.mlr.press/v139/lin21b.html", "slides": "", "author_site": "Xiang Lin, Simeng Han, Shafiq Joty", "author": "Xiang Lin; Simeng Han; Shafiq Joty", "abstract": "Advanced large-scale neural language models have led to significant success in many language generation tasks. However, the most commonly used training objective, Maximum Likelihood Estimation (MLE), has been shown problematic, where the trained model prefers using dull and repetitive phrases. In this work, we introduce ScaleGrad, a modification straight to the gradient of the loss function, to remedy the degeneration issue of the standard MLE objective. By directly maneuvering the gradient information, ScaleGrad makes the model learn to use novel tokens. Empirical results show the effectiveness of our method not only in open-ended generation, but also in directed generation tasks. With the simplicity in architecture, our method can serve as a general training objective that is applicable to most of the neural text generation tasks.", "bibtex": "@InProceedings{pmlr-v139-lin21b,\n title = \t {Straight to the Gradient: Learning to Use Novel Tokens for Neural Text Generation},\n author = {Lin, Xiang and Han, Simeng and Joty, Shafiq},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6642--6653},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lin21b/lin21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/lin21b.html},\n abstract = \t {Advanced large-scale neural language models have led to significant success in many language generation tasks. However, the most commonly used training objective, Maximum Likelihood Estimation (MLE), has been shown problematic, where the trained model prefers using dull and repetitive phrases. In this work, we introduce ScaleGrad, a modification straight to the gradient of the loss function, to remedy the degeneration issue of the standard MLE objective. By directly maneuvering the gradient information, ScaleGrad makes the model learn to use novel tokens. Empirical results show the effectiveness of our method not only in open-ended generation, but also in directed generation tasks. With the simplicity in architecture, our method can serve as a general training objective that is applicable to most of the neural text generation tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/lin21b/lin21b.pdf", "supp": "", "pdf_size": 2675132, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=743520526432802506&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Nanyang Technological University, Singapore; Nanyang Technological University, Singapore; Nanyang Technological University, Singapore + Salesforce Research Asia, Singapore", "aff_domain": "e.ntu.edu.sg; ; ", "email": "e.ntu.edu.sg; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/lin21b.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "Nanyang Technological University;Salesforce Research Asia", "aff_unique_dep": ";", "aff_unique_url": "https://www.ntu.edu.sg;https://research.salesforce.com", "aff_unique_abbr": "NTU;SRA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "Singapore" }, { "title": "Strategic Classification Made Practical", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8861", "id": "8861", "proceeding": "http://proceedings.mlr.press/v139/levanon21a.html", "slides": "", "author_site": "Sagi Levanon, Nir Rosenfeld", "author": "Sagi Levanon; Nir Rosenfeld", "abstract": "Strategic classification regards the problem of learning in settings where users can strategically modify their features to improve outcomes. This setting applies broadly, and has received much recent attention. But despite its practical significance, work in this space has so far been predominantly theoretical. In this paper we present a learning framework for strategic classification that is practical. Our approach directly minimizes the \u201cstrategic\u201d empirical risk, which we achieve by differentiating through the strategic response of users. This provides flexibility that allows us to extend beyond the original problem formulation and towards more realistic learning scenarios. A series of experiments demonstrates the effectiveness of our approach on various learning settings.", "bibtex": "@InProceedings{pmlr-v139-levanon21a,\n title = \t {Strategic Classification Made Practical},\n author = {Levanon, Sagi and Rosenfeld, Nir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6243--6253},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/levanon21a/levanon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/levanon21a.html},\n abstract = \t {Strategic classification regards the problem of learning in settings where users can strategically modify their features to improve outcomes. This setting applies broadly, and has received much recent attention. But despite its practical significance, work in this space has so far been predominantly theoretical. In this paper we present a learning framework for strategic classification that is practical. Our approach directly minimizes the \u201cstrategic\u201d empirical risk, which we achieve by differentiating through the strategic response of users. This provides flexibility that allows us to extend beyond the original problem formulation and towards more realistic learning scenarios. A series of experiments demonstrates the effectiveness of our approach on various learning settings.}\n}", "pdf": "http://proceedings.mlr.press/v139/levanon21a/levanon21a.pdf", "supp": "", "pdf_size": 831823, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6308861918899589533&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, Technion - Israel Institute of Technology; Department of Computer Science, Technion - Israel Institute of Technology", "aff_domain": "cs.technion.ac.il;cs.technion.ac.il", "email": "cs.technion.ac.il;cs.technion.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/levanon21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Technion - Israel Institute of Technology", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Strategic Classification in the Dark", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9097", "id": "9097", "proceeding": "http://proceedings.mlr.press/v139/ghalme21a.html", "slides": "/media/icml-2021/Slides/9097.pdf", "author_site": "Ganesh Ghalme, Vineet Nair, Itay Eilat, Inbal Talgam-Cohen, Nir Rosenfeld", "author": "Ganesh Ghalme; Vineet Nair; Itay Eilat; Inbal Talgam-Cohen; Nir Rosenfeld", "abstract": "Strategic classification studies the interaction between a classification rule and the strategic agents it governs. Agents respond by manipulating their features, under the assumption that the classifier is known. However, in many real-life scenarios of high-stake classification (e.g., credit scoring), the classifier is not revealed to the agents, which leads agents to attempt to learn the classifier and game it too. In this paper we generalize the strategic classification model to such scenarios and analyze the effect of an unknown classifier. We define the \u201dprice of opacity\u201d as the difference between the prediction error under the opaque and transparent policies, characterize it, and give a sufficient condition for it to be strictly positive, in which case transparency is the recommended policy. Our experiments show how Hardt et al.\u2019s robust classifier is affected by keeping agents in the dark.", "bibtex": "@InProceedings{pmlr-v139-ghalme21a,\n title = \t {Strategic Classification in the Dark},\n author = {Ghalme, Ganesh and Nair, Vineet and Eilat, Itay and Talgam-Cohen, Inbal and Rosenfeld, Nir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3672--3681},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ghalme21a/ghalme21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ghalme21a.html},\n abstract = \t {Strategic classification studies the interaction between a classification rule and the strategic agents it governs. Agents respond by manipulating their features, under the assumption that the classifier is known. However, in many real-life scenarios of high-stake classification (e.g., credit scoring), the classifier is not revealed to the agents, which leads agents to attempt to learn the classifier and game it too. In this paper we generalize the strategic classification model to such scenarios and analyze the effect of an unknown classifier. We define the \u201dprice of opacity\u201d as the difference between the prediction error under the opaque and transparent policies, characterize it, and give a sufficient condition for it to be strictly positive, in which case transparency is the recommended policy. Our experiments show how Hardt et al.\u2019s robust classifier is affected by keeping agents in the dark.}\n}", "pdf": "http://proceedings.mlr.press/v139/ghalme21a/ghalme21a.pdf", "supp": "", "pdf_size": 1404850, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15886223975765131668&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Technion - Israel Institute of Technology; Technion - Israel Institute of Technology; Technion - Israel Institute of Technology; Technion - Israel Institute of Technology; Technion - Israel Institute of Technology", "aff_domain": "campus.technion.ac.il;cs.technion.ac.il; ; ; ", "email": "campus.technion.ac.il;cs.technion.ac.il; ; ; ", "github": "", "project": "https://openschufa.de/english/", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/ghalme21a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Technion - Israel Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.technion.ac.il/en/", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Israel" }, { "title": "Streaming Bayesian Deep Tensor Factorization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10483", "id": "10483", "proceeding": "http://proceedings.mlr.press/v139/fang21d.html", "slides": "", "author_site": "Shikai Fang, Zheng Wang, Zhimeng Pan, Ji Liu, Shandian Zhe", "author": "Shikai Fang; Zheng Wang; Zhimeng Pan; Ji Liu; Shandian Zhe", "abstract": "Despite the success of existing tensor factorization methods, most of them conduct a multilinear decomposition, and rarely exploit powerful modeling frameworks, like deep neural networks, to capture a variety of complicated interactions in data. More important, for highly expressive, deep factorization, we lack an effective approach to handle streaming data, which are ubiquitous in real-world applications. To address these issues, we propose SBTD, a Streaming Bayesian Deep Tensor factorization method. We first use Bayesian neural networks (NNs) to build a deep tensor factorization model. We assign a spike-and-slab prior over each NN weight to encourage sparsity and to prevent overfitting. We then use multivariate Delta\u2019s method and moment matching to approximate the posterior of the NN output and calculate the running model evidence, based on which we develop an efficient streaming posterior inference algorithm in the assumed-density-filtering and expectation propagation framework. Our algorithm provides responsive incremental updates for the posterior of the latent factors and NN weights upon receiving newly observed tensor entries, and meanwhile identify and inhibit redundant/useless weights. We show the advantages of our approach in four real-world applications.", "bibtex": "@InProceedings{pmlr-v139-fang21d,\n title = \t {Streaming Bayesian Deep Tensor Factorization},\n author = {Fang, Shikai and Wang, Zheng and Pan, Zhimeng and Liu, Ji and Zhe, Shandian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3133--3142},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fang21d/fang21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/fang21d.html},\n abstract = \t {Despite the success of existing tensor factorization methods, most of them conduct a multilinear decomposition, and rarely exploit powerful modeling frameworks, like deep neural networks, to capture a variety of complicated interactions in data. More important, for highly expressive, deep factorization, we lack an effective approach to handle streaming data, which are ubiquitous in real-world applications. To address these issues, we propose SBTD, a Streaming Bayesian Deep Tensor factorization method. We first use Bayesian neural networks (NNs) to build a deep tensor factorization model. We assign a spike-and-slab prior over each NN weight to encourage sparsity and to prevent overfitting. We then use multivariate Delta\u2019s method and moment matching to approximate the posterior of the NN output and calculate the running model evidence, based on which we develop an efficient streaming posterior inference algorithm in the assumed-density-filtering and expectation propagation framework. Our algorithm provides responsive incremental updates for the posterior of the latent factors and NN weights upon receiving newly observed tensor entries, and meanwhile identify and inhibit redundant/useless weights. We show the advantages of our approach in four real-world applications.}\n}", "pdf": "http://proceedings.mlr.press/v139/fang21d/fang21d.pdf", "supp": "", "pdf_size": 984162, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3348197374019655215&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Utah; Kwai Inc; University of Rochester; University of Utah; University of Utah", "aff_domain": "cs.utah.edu; ; ; ;cs.utah.edu", "email": "cs.utah.edu; ; ; ;cs.utah.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/fang21d.html", "aff_unique_index": "0;1;2;0;0", "aff_unique_norm": "University of Utah;Kwai Inc;University of Rochester", "aff_unique_dep": ";;", "aff_unique_url": "https://www.utah.edu;https://www.kwai.com;https://www.rochester.edu", "aff_unique_abbr": "Utah;;U of R", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0;0", "aff_country_unique": "United States;China" }, { "title": "Streaming and Distributed Algorithms for Robust Column Subset Selection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9919", "id": "9919", "proceeding": "http://proceedings.mlr.press/v139/jiang21e.html", "slides": "", "author_site": "Shuli Jiang, Dongyu Li, Irene Mengze Li, Arvind Mahankali, David Woodruff", "author": "Shuli Jiang; Dennis Li; Irene Mengze Li; Arvind V Mahankali; David Woodruff", "abstract": "We give the first single-pass streaming algorithm for Column Subset Selection with respect to the entrywise $\\ell_p$-norm with $1 \\leq p < 2$. We study the $\\ell_p$ norm loss since it is often considered more robust to noise than the standard Frobenius norm. Given an input matrix $A \\in \\mathbb{R}^{d \\times n}$ ($n \\gg d$), our algorithm achieves a multiplicative $k^{\\frac{1}{p} - \\frac{1}{2}}\\poly(\\log nd)$-approximation to the error with respect to the \\textit{best possible column subset} of size $k$. Furthermore, the space complexity of the streaming algorithm is optimal up to a logarithmic factor. Our streaming algorithm also extends naturally to a 1-round distributed protocol with nearly optimal communication cost. A key ingredient in our algorithms is a reduction to column subset selection in the $\\ell_{p,2}$-norm, which corresponds to the $p$-norm of the vector of Euclidean norms of each of the columns of $A$. This enables us to leverage strong coreset constructions for the Euclidean norm, which previously had not been applied in this context. We also give the first provable guarantees for greedy column subset selection in the $\\ell_{1, 2}$ norm, which can be used as an alternative, practical subroutine in our algorithms. Finally, we show that our algorithms give significant practical advantages on real-world data analysis tasks.", "bibtex": "@InProceedings{pmlr-v139-jiang21e,\n title = \t {Streaming and Distributed Algorithms for Robust Column Subset Selection},\n author = {Jiang, Shuli and Li, Dennis and Li, Irene Mengze and Mahankali, Arvind V and Woodruff, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4971--4981},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21e/jiang21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21e.html},\n abstract = \t {We give the first single-pass streaming algorithm for Column Subset Selection with respect to the entrywise $\\ell_p$-norm with $1 \\leq p < 2$. We study the $\\ell_p$ norm loss since it is often considered more robust to noise than the standard Frobenius norm. Given an input matrix $A \\in \\mathbb{R}^{d \\times n}$ ($n \\gg d$), our algorithm achieves a multiplicative $k^{\\frac{1}{p} - \\frac{1}{2}}\\poly(\\log nd)$-approximation to the error with respect to the \\textit{best possible column subset} of size $k$. Furthermore, the space complexity of the streaming algorithm is optimal up to a logarithmic factor. Our streaming algorithm also extends naturally to a 1-round distributed protocol with nearly optimal communication cost. A key ingredient in our algorithms is a reduction to column subset selection in the $\\ell_{p,2}$-norm, which corresponds to the $p$-norm of the vector of Euclidean norms of each of the columns of $A$. This enables us to leverage strong coreset constructions for the Euclidean norm, which previously had not been applied in this context. We also give the first provable guarantees for greedy column subset selection in the $\\ell_{1, 2}$ norm, which can be used as an alternative, practical subroutine in our algorithms. Finally, we show that our algorithms give significant practical advantages on real-world data analysis tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21e/jiang21e.pdf", "supp": "", "pdf_size": 680513, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14557967983043893613&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA; School of Computer Science, Carnegie Mellon University, Pittsburgh, Pennsylvania, USA", "aff_domain": "andrew.cmu.edu;andrew.cmu.edu; ;andrew.cmu.edu; ", "email": "andrew.cmu.edu;andrew.cmu.edu; ;andrew.cmu.edu; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/jiang21e.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Structured Convolutional Kernel Networks for Airline Crew Scheduling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9175", "id": "9175", "proceeding": "http://proceedings.mlr.press/v139/yaakoubi21a.html", "slides": "/media/icml-2021/Slides/9175.pdf", "author_site": "Yassine Yaakoubi, Francois Soumis, Simon Lacoste-Julien", "author": "Yassine Yaakoubi; Francois Soumis; Simon Lacoste-Julien", "abstract": "Motivated by the needs from an airline crew scheduling application, we introduce structured convolutional kernel networks (Struct-CKN), which combine CKNs from Mairal et al. (2014) in a structured prediction framework that supports constraints on the outputs. CKNs are a particular kind of convolutional neural networks that approximate a kernel feature map on training data, thus combining properties of deep learning with the non-parametric flexibility of kernel methods. Extending CKNs to structured outputs allows us to obtain useful initial solutions on a flight-connection dataset that can be further refined by an airline crew scheduling solver. More specifically, we use a flight-based network modeled as a general conditional random field capable of incorporating local constraints in the learning process. Our experiments demonstrate that this approach yields significant improvements for the large-scale crew pairing problem (50,000 flights per month) over standard approaches, reducing the solution cost by 17% (a gain of millions of dollars) and the cost of global constraints by 97%.", "bibtex": "@InProceedings{pmlr-v139-yaakoubi21a,\n title = \t {Structured Convolutional Kernel Networks for Airline Crew Scheduling},\n author = {Yaakoubi, Yassine and Soumis, Francois and Lacoste-Julien, Simon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11626--11636},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yaakoubi21a/yaakoubi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yaakoubi21a.html},\n abstract = \t {Motivated by the needs from an airline crew scheduling application, we introduce structured convolutional kernel networks (Struct-CKN), which combine CKNs from Mairal et al. (2014) in a structured prediction framework that supports constraints on the outputs. CKNs are a particular kind of convolutional neural networks that approximate a kernel feature map on training data, thus combining properties of deep learning with the non-parametric flexibility of kernel methods. Extending CKNs to structured outputs allows us to obtain useful initial solutions on a flight-connection dataset that can be further refined by an airline crew scheduling solver. More specifically, we use a flight-based network modeled as a general conditional random field capable of incorporating local constraints in the learning process. Our experiments demonstrate that this approach yields significant improvements for the large-scale crew pairing problem (50,000 flights per month) over standard approaches, reducing the solution cost by 17% (a gain of millions of dollars) and the cost of global constraints by 97%.}\n}", "pdf": "http://proceedings.mlr.press/v139/yaakoubi21a/yaakoubi21a.pdf", "supp": "", "pdf_size": 386165, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6467944180520163376&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "GERAD, Polytechnique Montr\u00e9al, Canada+Mila, Canada; GERAD, Polytechnique Montr\u00e9al, Canada; Mila, Canada+Department of Computer Science and Operations Research, Universit\u00e9 de Montr\u00e9al, Canada", "aff_domain": "outlook.com; ; ", "email": "outlook.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yaakoubi21a.html", "aff_unique_index": "0+1;0;1+2", "aff_unique_norm": "Polytechnique Montr\u00e9al;Mila;Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "GERAD;;Department of Computer Science and Operations Research", "aff_unique_url": "https://www.polymtl.ca;https://mila.quebec;https://www.umontreal.ca", "aff_unique_abbr": "Polytechnique;Mila;UdeM", "aff_campus_unique_index": "0;0;", "aff_campus_unique": "Montr\u00e9al;", "aff_country_unique_index": "0+0;0;0+0", "aff_country_unique": "Canada" }, { "title": "Structured World Belief for Reinforcement Learning in POMDP", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9539", "id": "9539", "proceeding": "http://proceedings.mlr.press/v139/singh21a.html", "slides": "/media/icml-2021/Slides/9539.pdf", "author_site": "Gautam Singh, Skand Peri, Junghyun Kim, Hyunseok Kim, Sungjin Ahn", "author": "Gautam Singh; Skand Peri; Junghyun Kim; Hyunseok Kim; Sungjin Ahn", "abstract": "Object-centric world models provide structured representation of the scene and can be an important backbone in reinforcement learning and planning. However, existing approaches suffer in partially-observable environments due to the lack of belief states. In this paper, we propose Structured World Belief, a model for learning and inference of object-centric belief states. Inferred by Sequential Monte Carlo (SMC), our belief states provide multiple object-centric scene hypotheses. To synergize the benefits of SMC particles with object representations, we also propose a new object-centric dynamics model that considers the inductive bias of object permanence. This enables tracking of object states even when they are invisible for a long time. To further facilitate object tracking in this regime, we allow our model to attend flexibly to any spatial location in the image which was restricted in previous models. In experiments, we show that object-centric belief provides a more accurate and robust performance for filtering and generation. Furthermore, we show the efficacy of structured world belief in improving the performance of reinforcement learning, planning and supervised reasoning.", "bibtex": "@InProceedings{pmlr-v139-singh21a,\n title = \t {Structured World Belief for Reinforcement Learning in POMDP},\n author = {Singh, Gautam and Peri, Skand and Kim, Junghyun and Kim, Hyunseok and Ahn, Sungjin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9744--9755},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/singh21a/singh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/singh21a.html},\n abstract = \t {Object-centric world models provide structured representation of the scene and can be an important backbone in reinforcement learning and planning. However, existing approaches suffer in partially-observable environments due to the lack of belief states. In this paper, we propose Structured World Belief, a model for learning and inference of object-centric belief states. Inferred by Sequential Monte Carlo (SMC), our belief states provide multiple object-centric scene hypotheses. To synergize the benefits of SMC particles with object representations, we also propose a new object-centric dynamics model that considers the inductive bias of object permanence. This enables tracking of object states even when they are invisible for a long time. To further facilitate object tracking in this regime, we allow our model to attend flexibly to any spatial location in the image which was restricted in previous models. In experiments, we show that object-centric belief provides a more accurate and robust performance for filtering and generation. Furthermore, we show the efficacy of structured world belief in improving the performance of reinforcement learning, planning and supervised reasoning.}\n}", "pdf": "http://proceedings.mlr.press/v139/singh21a/singh21a.pdf", "supp": "", "pdf_size": 1056844, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8772963346230054019&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Rutgers University; Department of Computer Science, Rutgers University; Department of Computer Science, Rutgers University; Electronics and Telecommunications Research Institute; Department of Computer Science, Rutgers University + Rutgers Center for Cognitive Science", "aff_domain": "cs.rutgers.edu; ; ; ;gmail.com", "email": "cs.rutgers.edu; ; ; ;gmail.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/singh21a.html", "aff_unique_index": "0;0;0;1;0+0", "aff_unique_norm": "Rutgers University;Electronics and Telecommunications Research Institute", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.rutgers.edu;http://www.etri.re.kr", "aff_unique_abbr": "Rutgers;ETRI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;0+0", "aff_country_unique": "United States;South Korea" }, { "title": "Submodular Maximization subject to a Knapsack Constraint: Combinatorial Algorithms with Near-optimal Adaptive Complexity", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10581", "id": "10581", "proceeding": "http://proceedings.mlr.press/v139/amanatidis21a.html", "slides": "", "author_site": "Georgios Amanatidis, Federico Fusco, Philip Lazos, Stefano Leonardi, Alberto Marchetti-Spaccamela, Rebecca Reiffenh\u00e4user", "author": "Georgios Amanatidis; Federico Fusco; Philip Lazos; Stefano Leonardi; Alberto Marchetti-Spaccamela; Rebecca Reiffenh\u00e4user", "abstract": "The growing need to deal with massive instances motivates the design of algorithms balancing the quality of the solution with applicability. For the latter, an important measure is the \\emph{adaptive complexity}, capturing the number of sequential rounds of parallel computation needed. In this work we obtain the first \\emph{constant factor} approximation algorithm for non-monotone submodular maximization subject to a knapsack constraint with \\emph{near-optimal} $O(\\log n)$ adaptive complexity. Low adaptivity by itself, however, is not enough: one needs to account for the total number of function evaluations (or value queries) as well. Our algorithm asks $\\tilde{O}(n^2)$ value queries, but can be modified to run with only $\\tilde{O}(n)$ instead, while retaining a low adaptive complexity of $O(\\log^2n)$. Besides the above improvement in adaptivity, this is also the first \\emph{combinatorial} approach with sublinear adaptive complexity for the problem and yields algorithms comparable to the state-of-the-art even for the special cases of cardinality constraints or monotone objectives. Finally, we showcase our algorithms\u2019 applicability on real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-amanatidis21a,\n title = \t {Submodular Maximization subject to a Knapsack Constraint: Combinatorial Algorithms with Near-optimal Adaptive Complexity},\n author = {Amanatidis, Georgios and Fusco, Federico and Lazos, Philip and Leonardi, Stefano and Marchetti-Spaccamela, Alberto and Reiffenh{\\\"a}user, Rebecca},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {231--242},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/amanatidis21a/amanatidis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/amanatidis21a.html},\n abstract = \t {The growing need to deal with massive instances motivates the design of algorithms balancing the quality of the solution with applicability. For the latter, an important measure is the \\emph{adaptive complexity}, capturing the number of sequential rounds of parallel computation needed. In this work we obtain the first \\emph{constant factor} approximation algorithm for non-monotone submodular maximization subject to a knapsack constraint with \\emph{near-optimal} $O(\\log n)$ adaptive complexity. Low adaptivity by itself, however, is not enough: one needs to account for the total number of function evaluations (or value queries) as well. Our algorithm asks $\\tilde{O}(n^2)$ value queries, but can be modified to run with only $\\tilde{O}(n)$ instead, while retaining a low adaptive complexity of $O(\\log^2n)$. Besides the above improvement in adaptivity, this is also the first \\emph{combinatorial} approach with sublinear adaptive complexity for the problem and yields algorithms comparable to the state-of-the-art even for the special cases of cardinality constraints or monotone objectives. Finally, we showcase our algorithms\u2019 applicability on real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/amanatidis21a/amanatidis21a.pdf", "supp": "", "pdf_size": 777341, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16333111153072864505&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Essex, United Kingdom; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy; Sapienza University of Rome, Italy", "aff_domain": "diag.uniroma1.it; ; ; ; ; ", "email": "diag.uniroma1.it; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/amanatidis21a.html", "aff_unique_index": "0;1;1;1;1;1", "aff_unique_norm": "University of Essex;Sapienza University of Rome", "aff_unique_dep": ";", "aff_unique_url": "https://www.essex.ac.uk;https://www.uniroma1.it", "aff_unique_abbr": "Essex;Sapienza", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;1;1;1", "aff_country_unique": "United Kingdom;Italy" }, { "title": "Supervised Tree-Wasserstein Distance", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10645", "id": "10645", "proceeding": "http://proceedings.mlr.press/v139/takezawa21a.html", "slides": "/media/icml-2021/Slides/10645_KIP3fFF.pdf", "author_site": "Yuki Takezawa, Ryoma Sato, Makoto Yamada", "author": "Yuki Takezawa; Ryoma Sato; Makoto Yamada", "abstract": "To measure the similarity of documents, the Wasserstein distance is a powerful tool, but it requires a high computational cost. Recently, for fast computation of the Wasserstein distance, methods for approximating the Wasserstein distance using a tree metric have been proposed. These tree-based methods allow fast comparisons of a large number of documents; however, they are unsupervised and do not learn task-specific distances. In this work, we propose the Supervised Tree-Wasserstein (STW) distance, a fast, supervised metric learning method based on the tree metric. Specifically, we rewrite the Wasserstein distance on the tree metric by the parent-child relationships of a tree, and formulate it as a continuous optimization problem using a contrastive loss. Experimentally, we show that the STW distance can be computed fast, and improves the accuracy of document classification tasks. Furthermore, the STW distance is formulated by matrix multiplications, runs on a GPU, and is suitable for batch processing. Therefore, we show that the STW distance is extremely efficient when comparing a large number of documents.", "bibtex": "@InProceedings{pmlr-v139-takezawa21a,\n title = \t {Supervised Tree-Wasserstein Distance},\n author = {Takezawa, Yuki and Sato, Ryoma and Yamada, Makoto},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10086--10095},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/takezawa21a/takezawa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/takezawa21a.html},\n abstract = \t {To measure the similarity of documents, the Wasserstein distance is a powerful tool, but it requires a high computational cost. Recently, for fast computation of the Wasserstein distance, methods for approximating the Wasserstein distance using a tree metric have been proposed. These tree-based methods allow fast comparisons of a large number of documents; however, they are unsupervised and do not learn task-specific distances. In this work, we propose the Supervised Tree-Wasserstein (STW) distance, a fast, supervised metric learning method based on the tree metric. Specifically, we rewrite the Wasserstein distance on the tree metric by the parent-child relationships of a tree, and formulate it as a continuous optimization problem using a contrastive loss. Experimentally, we show that the STW distance can be computed fast, and improves the accuracy of document classification tasks. Furthermore, the STW distance is formulated by matrix multiplications, runs on a GPU, and is suitable for batch processing. Therefore, we show that the STW distance is extremely efficient when comparing a large number of documents.}\n}", "pdf": "http://proceedings.mlr.press/v139/takezawa21a/takezawa21a.pdf", "supp": "", "pdf_size": 1818630, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=551412745035931567&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Kyoto University+RIKEN AIP; Kyoto University+RIKEN AIP; Kyoto University+RIKEN AIP", "aff_domain": "ml.ist.i.kyoto-u.ac.jp; ; ", "email": "ml.ist.i.kyoto-u.ac.jp; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/takezawa21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Kyoto University;RIKEN", "aff_unique_dep": ";Advanced Institute for Computational Science", "aff_unique_url": "https://www.kyoto-u.ac.jp;https://www.aip.riken.jp", "aff_unique_abbr": "Kyoto U;RIKEN AIP", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Symmetric Spaces for Graph Embeddings: A Finsler-Riemannian Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9683", "id": "9683", "proceeding": "http://proceedings.mlr.press/v139/lopez21a.html", "slides": "/media/icml-2021/Slides/9683.pdf", "author_site": "Federico Lopez, Beatrice Pozzetti, Steve Trettel, Michael Strube, Anna Wienhard", "author": "Federico Lopez; Beatrice Pozzetti; Steve Trettel; Michael Strube; Anna Wienhard", "abstract": "Learning faithful graph representations as sets of vertex embeddings has become a fundamental intermediary step in a wide range of machine learning applications. We propose the systematic use of symmetric spaces in representation learning, a class encompassing many of the previously used embedding targets. This enables us to introduce a new method, the use of Finsler metrics integrated in a Riemannian optimization scheme, that better adapts to dissimilar structures in the graph. We develop a tool to analyze the embeddings and infer structural properties of the data sets. For implementation, we choose Siegel spaces, a versatile family of symmetric spaces. Our approach outperforms competitive baselines for graph reconstruction tasks on various synthetic and real-world datasets. We further demonstrate its applicability on two downstream tasks, recommender systems and node classification.", "bibtex": "@InProceedings{pmlr-v139-lopez21a,\n title = \t {Symmetric Spaces for Graph Embeddings: A Finsler-Riemannian Approach},\n author = {Lopez, Federico and Pozzetti, Beatrice and Trettel, Steve and Strube, Michael and Wienhard, Anna},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7090--7101},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lopez21a/lopez21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lopez21a.html},\n abstract = \t {Learning faithful graph representations as sets of vertex embeddings has become a fundamental intermediary step in a wide range of machine learning applications. We propose the systematic use of symmetric spaces in representation learning, a class encompassing many of the previously used embedding targets. This enables us to introduce a new method, the use of Finsler metrics integrated in a Riemannian optimization scheme, that better adapts to dissimilar structures in the graph. We develop a tool to analyze the embeddings and infer structural properties of the data sets. For implementation, we choose Siegel spaces, a versatile family of symmetric spaces. Our approach outperforms competitive baselines for graph reconstruction tasks on various synthetic and real-world datasets. We further demonstrate its applicability on two downstream tasks, recommender systems and node classification.}\n}", "pdf": "http://proceedings.mlr.press/v139/lopez21a/lopez21a.pdf", "supp": "", "pdf_size": 5114141, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12337649232069613673&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Heidelberg Institute for Theoretical Studies, Heidelberg, Germany+Mathematical Institute, Heidelberg University, Heidelberg, Germany; Mathematical Institute, Heidelberg University, Heidelberg, Germany; Department of Mathematics, Stanford University, California, USA; Heidelberg Institute for Theoretical Studies, Heidelberg, Germany; Mathematical Institute, Heidelberg University, Heidelberg, Germany", "aff_domain": "h-its.org; ; ; ; ", "email": "h-its.org; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lopez21a.html", "aff_unique_index": "0+1;1;2;0;1", "aff_unique_norm": "Heidelberg Institute for Theoretical Studies;Heidelberg University;Stanford University", "aff_unique_dep": ";Mathematical Institute;Department of Mathematics", "aff_unique_url": "https://www.hits.org/;https://www.uni-heidelberg.de;https://www.stanford.edu", "aff_unique_abbr": "HITS;Uni Heidelberg;Stanford", "aff_campus_unique_index": "0+0;0;1;0;0", "aff_campus_unique": "Heidelberg;California", "aff_country_unique_index": "0+0;0;1;0;0", "aff_country_unique": "Germany;United States" }, { "title": "Synthesizer: Rethinking Self-Attention for Transformer Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9307", "id": "9307", "proceeding": "http://proceedings.mlr.press/v139/tay21a.html", "slides": "", "author_site": "Yi Tay, Dara Bahri, Don Metzler, Da-Cheng Juan, Zhe Zhao, Che Zheng", "author": "Yi Tay; Dara Bahri; Donald Metzler; Da-Cheng Juan; Zhe Zhao; Che Zheng", "abstract": "The dot product self-attention is known to be central and indispensable to state-of-the-art Transformer models. But is it really required? This paper investigates the true importance and contribution of the dot product-based self-attention mechanism on the performance of Transformer models. Via extensive experiments, we find that (1) random alignment matrices surprisingly perform quite competitively and (2) learning attention weights from token-token (query-key) interactions is useful but not that important after all. To this end, we propose \\textsc{Synthesizer}, a model that learns synthetic attention weights without token-token interactions. In our experiments, we first show that simple Synthesizers achieve highly competitive performance when compared against vanilla Transformer models across a range of tasks, including machine translation, language modeling, text generation and GLUE/SuperGLUE benchmarks. When composed with dot product attention, we find that Synthesizers consistently outperform Transformers. Moreover, we conduct additional comparisons of Synthesizers against Dynamic Convolutions, showing that simple Random Synthesizer is not only $60%$ faster but also improves perplexity by a relative $3.5%$. Finally, we show that simple factorized Synthesizers can outperform Linformers on encoding only tasks.", "bibtex": "@InProceedings{pmlr-v139-tay21a,\n title = \t {Synthesizer: Rethinking Self-Attention for Transformer Models},\n author = {Tay, Yi and Bahri, Dara and Metzler, Donald and Juan, Da-Cheng and Zhao, Zhe and Zheng, Che},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10183--10192},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tay21a/tay21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tay21a.html},\n abstract = \t {The dot product self-attention is known to be central and indispensable to state-of-the-art Transformer models. But is it really required? This paper investigates the true importance and contribution of the dot product-based self-attention mechanism on the performance of Transformer models. Via extensive experiments, we find that (1) random alignment matrices surprisingly perform quite competitively and (2) learning attention weights from token-token (query-key) interactions is useful but not that important after all. To this end, we propose \\textsc{Synthesizer}, a model that learns synthetic attention weights without token-token interactions. In our experiments, we first show that simple Synthesizers achieve highly competitive performance when compared against vanilla Transformer models across a range of tasks, including machine translation, language modeling, text generation and GLUE/SuperGLUE benchmarks. When composed with dot product attention, we find that Synthesizers consistently outperform Transformers. Moreover, we conduct additional comparisons of Synthesizers against Dynamic Convolutions, showing that simple Random Synthesizer is not only $60%$ faster but also improves perplexity by a relative $3.5%$. Finally, we show that simple factorized Synthesizers can outperform Linformers on encoding only tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/tay21a/tay21a.pdf", "supp": "", "pdf_size": 694274, "gs_citation": 426, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13387830876140432247&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Google Research, Mountain View, California; Google Research, Mountain View, California; Google Research, Mountain View, California; Google Research, Mountain View, California; Google Research, Mountain View, California; Google Research, Mountain View, California", "aff_domain": "google.com; ; ; ; ; ", "email": "google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/tay21a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Systematic Analysis of Cluster Similarity Indices: How to Validate Validation Measures", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8429", "id": "8429", "proceeding": "http://proceedings.mlr.press/v139/gosgens21a.html", "slides": "", "author_site": "Martijn G\u00f6sgens, Aleksei Tikhonov, Liudmila Prokhorenkova", "author": "Martijn M G\u00f6sgens; Alexey Tikhonov; Liudmila Prokhorenkova", "abstract": "Many cluster similarity indices are used to evaluate clustering algorithms, and choosing the best one for a particular task remains an open problem. We demonstrate that this problem is crucial: there are many disagreements among the indices, these disagreements do affect which algorithms are preferred in applications, and this can lead to degraded performance in real-world systems. We propose a theoretical framework to tackle this problem: we develop a list of desirable properties and conduct an extensive theoretical analysis to verify which indices satisfy them. This allows for making an informed choice: given a particular application, one can first select properties that are desirable for the task and then identify indices satisfying these. Our work unifies and considerably extends existing attempts at analyzing cluster similarity indices: we introduce new properties, formalize existing ones, and mathematically prove or disprove each property for an extensive list of validation indices. This broader and more rigorous approach leads to recommendations that considerably differ from how validation indices are currently being chosen by practitioners. Some of the most popular indices are even shown to be dominated by previously overlooked ones.", "bibtex": "@InProceedings{pmlr-v139-gosgens21a,\n title = \t {Systematic Analysis of Cluster Similarity Indices: How to Validate Validation Measures},\n author = {G{\\\"o}sgens, Martijn M and Tikhonov, Alexey and Prokhorenkova, Liudmila},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3799--3808},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gosgens21a/gosgens21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gosgens21a.html},\n abstract = \t {Many cluster similarity indices are used to evaluate clustering algorithms, and choosing the best one for a particular task remains an open problem. We demonstrate that this problem is crucial: there are many disagreements among the indices, these disagreements do affect which algorithms are preferred in applications, and this can lead to degraded performance in real-world systems. We propose a theoretical framework to tackle this problem: we develop a list of desirable properties and conduct an extensive theoretical analysis to verify which indices satisfy them. This allows for making an informed choice: given a particular application, one can first select properties that are desirable for the task and then identify indices satisfying these. Our work unifies and considerably extends existing attempts at analyzing cluster similarity indices: we introduce new properties, formalize existing ones, and mathematically prove or disprove each property for an extensive list of validation indices. This broader and more rigorous approach leads to recommendations that considerably differ from how validation indices are currently being chosen by practitioners. Some of the most popular indices are even shown to be dominated by previously overlooked ones.}\n}", "pdf": "http://proceedings.mlr.press/v139/gosgens21a/gosgens21a.pdf", "supp": "", "pdf_size": 351900, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2257720371580982697&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Eindhoven University of Technology, Eindhoven, The Netherlands; Yandex, Berlin, Germany; Yandex, Moscow, Russia + Moscow Institute of Physics and Technology, Moscow, Russia + HSE University, Moscow, Russia", "aff_domain": "martijngosgens.nl; ; ", "email": "martijngosgens.nl; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gosgens21a.html", "aff_unique_index": "0;1;1+2+3", "aff_unique_norm": "Eindhoven University of Technology;Yandex;Moscow Institute of Physics and Technology;HSE University", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.tue.nl;https://yandex.com;https://www.mipt.ru/en;https://hse.ru", "aff_unique_abbr": "TU/e;Yandex;MIPT;HSE", "aff_campus_unique_index": "0;1;2+2+2", "aff_campus_unique": "Eindhoven;Berlin;Moscow", "aff_country_unique_index": "0;1;2+2+2", "aff_country_unique": "Netherlands;Germany;Russian Federation" }, { "title": "T-SCI: A Two-Stage Conformal Inference Algorithm with Guaranteed Coverage for Cox-MLP", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10303", "id": "10303", "proceeding": "http://proceedings.mlr.press/v139/teng21a.html", "slides": "", "author_site": "Jiaye Teng, Zeren Tan, Yang Yuan", "author": "Jiaye Teng; Zeren Tan; Yang Yuan", "abstract": "It is challenging to deal with censored data, where we only have access to the incomplete information of survival time instead of its exact value. Fortunately, under linear predictor assumption, people can obtain guaranteed coverage for the confidence interval of survival time using methods like Cox Regression. However, when relaxing the linear assumption with neural networks (e.g., Cox-MLP \\citep{katzman2018deepsurv,kvamme2019time}), we lose the guaranteed coverage. To recover the guaranteed coverage without linear assumption, we propose two algorithms based on conformal inference. In the first algorithm \\emph{WCCI}, we revisit weighted conformal inference and introduce a new non-conformity score based on partial likelihood. We then propose a two-stage algorithm \\emph{T-SCI}, where we run WCCI in the first stage and apply quantile conformal inference to calibrate the results in the second stage. Theoretical analysis shows that T-SCI returns guaranteed coverage under milder assumptions than WCCI. We conduct extensive experiments on synthetic data and real data using different methods, which validate our analysis.", "bibtex": "@InProceedings{pmlr-v139-teng21a,\n title = \t {T-SCI: A Two-Stage Conformal Inference Algorithm with Guaranteed Coverage for Cox-MLP},\n author = {Teng, Jiaye and Tan, Zeren and Yuan, Yang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10203--10213},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/teng21a/teng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/teng21a.html},\n abstract = \t {It is challenging to deal with censored data, where we only have access to the incomplete information of survival time instead of its exact value. Fortunately, under linear predictor assumption, people can obtain guaranteed coverage for the confidence interval of survival time using methods like Cox Regression. However, when relaxing the linear assumption with neural networks (e.g., Cox-MLP \\citep{katzman2018deepsurv,kvamme2019time}), we lose the guaranteed coverage. To recover the guaranteed coverage without linear assumption, we propose two algorithms based on conformal inference. In the first algorithm \\emph{WCCI}, we revisit weighted conformal inference and introduce a new non-conformity score based on partial likelihood. We then propose a two-stage algorithm \\emph{T-SCI}, where we run WCCI in the first stage and apply quantile conformal inference to calibrate the results in the second stage. Theoretical analysis shows that T-SCI returns guaranteed coverage under milder assumptions than WCCI. We conduct extensive experiments on synthetic data and real data using different methods, which validate our analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/teng21a/teng21a.pdf", "supp": "", "pdf_size": 5063878, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1012431253971456969&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China+Shanghai Qi Zhi Institute, Shanghai, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China+Shanghai Qi Zhi Institute, Shanghai, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China+Shanghai Qi Zhi Institute, Shanghai, China", "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "email": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/teng21a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "Tsinghua University;Shanghai Qi Zhi Institute", "aff_unique_dep": "Institute for Interdisciplinary Information Sciences;", "aff_unique_url": "https://www.tsinghua.edu.cn;", "aff_unique_abbr": "Tsinghua;", "aff_campus_unique_index": "0+1;0+1;0+1", "aff_campus_unique": "Beijing;Shanghai", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "China" }, { "title": "TFix: Learning to Fix Coding Errors with a Text-to-Text Transformer", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8789", "id": "8789", "proceeding": "http://proceedings.mlr.press/v139/berabi21a.html", "slides": "/media/icml-2021/Slides/8789.pdf", "author_site": "Berkay Berabi, Jingxuan He, Veselin Raychev, Martin Vechev", "author": "Berkay Berabi; Jingxuan He; Veselin Raychev; Martin Vechev", "abstract": "The problem of fixing errors in programs has attracted substantial interest over the years. The key challenge for building an effective code fixing tool is to capture a wide range of errors and meanwhile maintain high accuracy. In this paper, we address this challenge and present a new learning-based system, called TFix. TFix works directly on program text and phrases the problem of code fixing as a text-to-text task. In turn, this enables it to leverage a powerful Transformer based model pre-trained on natural language and fine-tuned to generate code fixes (via a large, high-quality dataset obtained from GitHub commits). TFix is not specific to a particular programming language or class of defects and, in fact, improved its precision by simultaneously fine-tuning on 52 different error types reported by a popular static analyzer. Our evaluation on a massive dataset of JavaScript programs shows that TFix is practically effective: it is able to synthesize code that fixes the error in \u00a067 percent of cases and significantly outperforms existing learning-based approaches.", "bibtex": "@InProceedings{pmlr-v139-berabi21a,\n title = \t {TFix: Learning to Fix Coding Errors with a Text-to-Text Transformer},\n author = {Berabi, Berkay and He, Jingxuan and Raychev, Veselin and Vechev, Martin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {780--791},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/berabi21a/berabi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/berabi21a.html},\n abstract = \t {The problem of fixing errors in programs has attracted substantial interest over the years. The key challenge for building an effective code fixing tool is to capture a wide range of errors and meanwhile maintain high accuracy. In this paper, we address this challenge and present a new learning-based system, called TFix. TFix works directly on program text and phrases the problem of code fixing as a text-to-text task. In turn, this enables it to leverage a powerful Transformer based model pre-trained on natural language and fine-tuned to generate code fixes (via a large, high-quality dataset obtained from GitHub commits). TFix is not specific to a particular programming language or class of defects and, in fact, improved its precision by simultaneously fine-tuning on 52 different error types reported by a popular static analyzer. Our evaluation on a massive dataset of JavaScript programs shows that TFix is practically effective: it is able to synthesize code that fixes the error in \u00a067 percent of cases and significantly outperforms existing learning-based approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/berabi21a/berabi21a.pdf", "supp": "", "pdf_size": 477038, "gs_citation": 166, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1256847814799504622&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, ETH Zurich, Switzerland + Snyk, Switzerland; Department of Computer Science, ETH Zurich, Switzerland; Department of Computer Science, ETH Zurich, Switzerland + Snyk, Switzerland; Department of Computer Science, ETH Zurich, Switzerland", "aff_domain": "gmail.com;inf.ethz.ch; ; ", "email": "gmail.com;inf.ethz.ch; ; ", "github": "https://github.com/eth-sri/TFix", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/berabi21a.html", "aff_unique_index": "0+1;0;0+1;0", "aff_unique_norm": "ETH Zurich;Snyk", "aff_unique_dep": "Department of Computer Science;", "aff_unique_url": "https://www.ethz.ch;https://snyk.io", "aff_unique_abbr": "ETHZ;Snyk", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0+0;0", "aff_country_unique": "Switzerland" }, { "title": "Targeted Data Acquisition for Evolving Negotiation Agents", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9211", "id": "9211", "proceeding": "http://proceedings.mlr.press/v139/kwon21a.html", "slides": "/media/icml-2021/Slides/9211.pdf", "author_site": "Minae Kwon, Siddharth Karamcheti, Mariano-Florentino Cuellar, Dorsa Sadigh", "author": "Minae Kwon; Siddharth Karamcheti; Mariano-Florentino Cuellar; Dorsa Sadigh", "abstract": "Successful negotiators must learn how to balance optimizing for self-interest and cooperation. Yet current artificial negotiation agents often heavily depend on the quality of the static datasets they were trained on, limiting their capacity to fashion an adaptive response balancing self-interest and cooperation. For this reason, we find that these agents can achieve either high utility or cooperation, but not both. To address this, we introduce a targeted data acquisition framework where we guide the exploration of a reinforcement learning agent using annotations from an expert oracle. The guided exploration incentivizes the learning agent to go beyond its static dataset and develop new negotiation strategies. We show that this enables our agents to obtain higher-reward and more Pareto-optimal solutions when negotiating with both simulated and human partners compared to standard supervised learning and reinforcement learning methods. This trend additionally holds when comparing agents using our targeted data acquisition framework to variants of agents trained with a mix of supervised learning and reinforcement learning, or to agents using tailored reward functions that explicitly optimize for utility and Pareto-optimality.", "bibtex": "@InProceedings{pmlr-v139-kwon21a,\n title = \t {Targeted Data Acquisition for Evolving Negotiation Agents},\n author = {Kwon, Minae and Karamcheti, Siddharth and Cuellar, Mariano-Florentino and Sadigh, Dorsa},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5894--5904},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kwon21a/kwon21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kwon21a.html},\n abstract = \t {Successful negotiators must learn how to balance optimizing for self-interest and cooperation. Yet current artificial negotiation agents often heavily depend on the quality of the static datasets they were trained on, limiting their capacity to fashion an adaptive response balancing self-interest and cooperation. For this reason, we find that these agents can achieve either high utility or cooperation, but not both. To address this, we introduce a targeted data acquisition framework where we guide the exploration of a reinforcement learning agent using annotations from an expert oracle. The guided exploration incentivizes the learning agent to go beyond its static dataset and develop new negotiation strategies. We show that this enables our agents to obtain higher-reward and more Pareto-optimal solutions when negotiating with both simulated and human partners compared to standard supervised learning and reinforcement learning methods. This trend additionally holds when comparing agents using our targeted data acquisition framework to variants of agents trained with a mix of supervised learning and reinforcement learning, or to agents using tailored reward functions that explicitly optimize for utility and Pareto-optimality.}\n}", "pdf": "http://proceedings.mlr.press/v139/kwon21a/kwon21a.pdf", "supp": "", "pdf_size": 2596566, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12045473614903856867&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; School of Law, Stanford University; Department of Computer Science, Stanford University", "aff_domain": "cs.stanford.edu; ; ; ", "email": "cs.stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/kwon21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Task-Optimal Exploration in Linear Dynamical Systems", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8917", "id": "8917", "proceeding": "http://proceedings.mlr.press/v139/wagenmaker21a.html", "slides": "", "author_site": "Andrew Wagenmaker, Max Simchowitz, Kevin Jamieson", "author": "Andrew J Wagenmaker; Max Simchowitz; Kevin Jamieson", "abstract": "Exploration in unknown environments is a fundamental problem in reinforcement learning and control. In this work, we study task-guided exploration and determine what precisely an agent must learn about their environment in order to complete a particular task. Formally, we study a broad class of decision-making problems in the setting of linear dynamical systems, a class that includes the linear quadratic regulator problem. We provide instance- and task-dependent lower bounds which explicitly quantify the difficulty of completing a task of interest. Motivated by our lower bound, we propose a computationally efficient experiment-design based exploration algorithm. We show that it optimally explores the environment, collecting precisely the information needed to complete the task, and provide finite-time bounds guaranteeing that it achieves the instance- and task-optimal sample complexity, up to constant factors. Through several examples of the linear quadratic regulator problem, we show that performing task-guided exploration provably improves on exploration schemes which do not take into account the task of interest. Along the way, we establish that certainty equivalence decision making is instance- and task-optimal, and obtain the first algorithm for the linear quadratic regulator problem which is instance-optimal. We conclude with several experiments illustrating the effectiveness of our approach in practice.", "bibtex": "@InProceedings{pmlr-v139-wagenmaker21a,\n title = \t {Task-Optimal Exploration in Linear Dynamical Systems},\n author = {Wagenmaker, Andrew J and Simchowitz, Max and Jamieson, Kevin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10641--10652},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wagenmaker21a/wagenmaker21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wagenmaker21a.html},\n abstract = \t {Exploration in unknown environments is a fundamental problem in reinforcement learning and control. In this work, we study task-guided exploration and determine what precisely an agent must learn about their environment in order to complete a particular task. Formally, we study a broad class of decision-making problems in the setting of linear dynamical systems, a class that includes the linear quadratic regulator problem. We provide instance- and task-dependent lower bounds which explicitly quantify the difficulty of completing a task of interest. Motivated by our lower bound, we propose a computationally efficient experiment-design based exploration algorithm. We show that it optimally explores the environment, collecting precisely the information needed to complete the task, and provide finite-time bounds guaranteeing that it achieves the instance- and task-optimal sample complexity, up to constant factors. Through several examples of the linear quadratic regulator problem, we show that performing task-guided exploration provably improves on exploration schemes which do not take into account the task of interest. Along the way, we establish that certainty equivalence decision making is instance- and task-optimal, and obtain the first algorithm for the linear quadratic regulator problem which is instance-optimal. We conclude with several experiments illustrating the effectiveness of our approach in practice.}\n}", "pdf": "http://proceedings.mlr.press/v139/wagenmaker21a/wagenmaker21a.pdf", "supp": "", "pdf_size": 1285378, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12462325871895191132&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington, Seattle, Washington, USA+1; Department of Electrical Engineering and Computer Science, University of California, Berkeley, California, USA+2; Paul G. Allen School of Computer Science & Engineering, University of Washington, Seattle, Washington, USA+1", "aff_domain": "cs.washington.edu; ; ", "email": "cs.washington.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/wagenmaker21a.html", "aff_unique_index": "0;2;0", "aff_unique_norm": "University of Washington;;University of California, Berkeley", "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.washington.edu;;https://www.berkeley.edu", "aff_unique_abbr": "UW;;UC Berkeley", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "Seattle;;Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States;" }, { "title": "Taylor Expansion of Discount Factors", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10085", "id": "10085", "proceeding": "http://proceedings.mlr.press/v139/tang21b.html", "slides": "/media/icml-2021/Slides/10085_HIJXySS.pdf", "author_site": "Yunhao Tang, Mark Rowland, Remi Munos, Michal Valko", "author": "Yunhao Tang; Mark Rowland; Remi Munos; Michal Valko", "abstract": "In practical reinforcement learning (RL), the discount factor used for estimating value functions often differs from that used for defining the evaluation objective. In this work, we study the effect that this discrepancy of discount factors has during learning, and discover a family of objectives that interpolate value functions of two distinct discount factors. Our analysis suggests new ways for estimating value functions and performing policy optimization updates, which demonstrate empirical performance gains. This framework also leads to new insights on commonly-used deep RL heuristic modifications to policy optimization algorithms.", "bibtex": "@InProceedings{pmlr-v139-tang21b,\n title = \t {Taylor Expansion of Discount Factors},\n author = {Tang, Yunhao and Rowland, Mark and Munos, Remi and Valko, Michal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10130--10140},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tang21b/tang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/tang21b.html},\n abstract = \t {In practical reinforcement learning (RL), the discount factor used for estimating value functions often differs from that used for defining the evaluation objective. In this work, we study the effect that this discrepancy of discount factors has during learning, and discover a family of objectives that interpolate value functions of two distinct discount factors. Our analysis suggests new ways for estimating value functions and performing policy optimization updates, which demonstrate empirical performance gains. This framework also leads to new insights on commonly-used deep RL heuristic modifications to policy optimization algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v139/tang21b/tang21b.pdf", "supp": "", "pdf_size": 1477625, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17707423409975809524&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Columbia University, New York, USA; DeepMind, London, UK; DeepMind, Paris, France; DeepMind, Paris, France", "aff_domain": "columbia.edu; ; ; ", "email": "columbia.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tang21b.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Columbia University;DeepMind", "aff_unique_dep": ";", "aff_unique_url": "https://www.columbia.edu;https://deepmind.com", "aff_unique_abbr": "Columbia;DeepMind", "aff_campus_unique_index": "0;1;2;2", "aff_campus_unique": "New York;London;Paris", "aff_country_unique_index": "0;1;2;2", "aff_country_unique": "United States;United Kingdom;France" }, { "title": "TeachMyAgent: a Benchmark for Automatic Curriculum Learning in Deep RL", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10321", "id": "10321", "proceeding": "http://proceedings.mlr.press/v139/romac21a.html", "slides": "/media/icml-2021/Slides/10321.pdf", "author_site": "Cl\u00e9ment Romac, R\u00e9my Portelas, Katja Hofmann, Pierre-Yves Oudeyer", "author": "Cl\u00e9ment Romac; R\u00e9my Portelas; Katja Hofmann; Pierre-Yves Oudeyer", "abstract": "Training autonomous agents able to generalize to multiple tasks is a key target of Deep Reinforcement Learning (DRL) research. In parallel to improving DRL algorithms themselves, Automatic Curriculum Learning (ACL) study how teacher algorithms can train DRL agents more efficiently by adapting task selection to their evolving abilities. While multiple standard benchmarks exist to compare DRL agents, there is currently no such thing for ACL algorithms. Thus, comparing existing approaches is difficult, as too many experimental parameters differ from paper to paper. In this work, we identify several key challenges faced by ACL algorithms. Based on these, we present TeachMyAgent (TA), a benchmark of current ACL algorithms leveraging procedural task generation. It includes 1) challenge-specific unit-tests using variants of a procedural Box2D bipedal walker environment, and 2) a new procedural Parkour environment combining most ACL challenges, making it ideal for global performance assessment. We then use TeachMyAgent to conduct a comparative study of representative existing approaches, showcasing the competitiveness of some ACL algorithms that do not use expert knowledge. We also show that the Parkour environment remains an open problem. We open-source our environments, all studied ACL algorithms (collected from open-source code or re-implemented), and DRL students in a Python package available at https://github.com/flowersteam/TeachMyAgent.", "bibtex": "@InProceedings{pmlr-v139-romac21a,\n title = \t {TeachMyAgent: a Benchmark for Automatic Curriculum Learning in Deep RL},\n author = {Romac, Cl{\\'e}ment and Portelas, R{\\'e}my and Hofmann, Katja and Oudeyer, Pierre-Yves},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9052--9063},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/romac21a/romac21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/romac21a.html},\n abstract = \t {Training autonomous agents able to generalize to multiple tasks is a key target of Deep Reinforcement Learning (DRL) research. In parallel to improving DRL algorithms themselves, Automatic Curriculum Learning (ACL) study how teacher algorithms can train DRL agents more efficiently by adapting task selection to their evolving abilities. While multiple standard benchmarks exist to compare DRL agents, there is currently no such thing for ACL algorithms. Thus, comparing existing approaches is difficult, as too many experimental parameters differ from paper to paper. In this work, we identify several key challenges faced by ACL algorithms. Based on these, we present TeachMyAgent (TA), a benchmark of current ACL algorithms leveraging procedural task generation. It includes 1) challenge-specific unit-tests using variants of a procedural Box2D bipedal walker environment, and 2) a new procedural Parkour environment combining most ACL challenges, making it ideal for global performance assessment. We then use TeachMyAgent to conduct a comparative study of representative existing approaches, showcasing the competitiveness of some ACL algorithms that do not use expert knowledge. We also show that the Parkour environment remains an open problem. We open-source our environments, all studied ACL algorithms (collected from open-source code or re-implemented), and DRL students in a Python package available at https://github.com/flowersteam/TeachMyAgent.}\n}", "pdf": "http://proceedings.mlr.press/v139/romac21a/romac21a.pdf", "supp": "", "pdf_size": 1291667, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11016662361926634008&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Inria, France; Inria, France; Microsoft Research, UK; Inria, France", "aff_domain": "inria.fr;inria.fr; ; ", "email": "inria.fr;inria.fr; ; ", "github": "https://github.com/flowersteam/TeachMyAgent", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/romac21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "INRIA;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.inria.fr;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Inria;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "France;United Kingdom" }, { "title": "TempoRL: Learning When to Act", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9879", "id": "9879", "proceeding": "http://proceedings.mlr.press/v139/biedenkapp21a.html", "slides": "/media/icml-2021/Slides/9879.pdf", "author_site": "Andr\u00e9 Biedenkapp, Raghu Rajan, Frank Hutter, Marius Lindauer", "author": "Andr\u00e9 Biedenkapp; Raghu Rajan; Frank Hutter; Marius Lindauer", "abstract": "Reinforcement learning is a powerful approach to learn behaviour through interactions with an environment. However, behaviours are usually learned in a purely reactive fashion, where an appropriate action is selected based on an observation. In this form, it is challenging to learn when it is necessary to execute new decisions. This makes learning inefficient especially in environments that need various degrees of fine and coarse control. To address this, we propose a proactive setting in which the agent not only selects an action in a state but also for how long to commit to that action. Our TempoRL approach introduces skip connections between states and learns a skip-policy for repeating the same action along these skips. We demonstrate the effectiveness of TempoRL on a variety of traditional and deep RL environments, showing that our approach is capable of learning successful policies up to an order of magnitude faster than vanilla Q-learning.", "bibtex": "@InProceedings{pmlr-v139-biedenkapp21a,\n title = \t {TempoRL: Learning When to Act},\n author = {Biedenkapp, Andr{\\'e} and Rajan, Raghu and Hutter, Frank and Lindauer, Marius},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {914--924},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/biedenkapp21a/biedenkapp21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/biedenkapp21a.html},\n abstract = \t {Reinforcement learning is a powerful approach to learn behaviour through interactions with an environment. However, behaviours are usually learned in a purely reactive fashion, where an appropriate action is selected based on an observation. In this form, it is challenging to learn when it is necessary to execute new decisions. This makes learning inefficient especially in environments that need various degrees of fine and coarse control. To address this, we propose a proactive setting in which the agent not only selects an action in a state but also for how long to commit to that action. Our TempoRL approach introduces skip connections between states and learns a skip-policy for repeating the same action along these skips. We demonstrate the effectiveness of TempoRL on a variety of traditional and deep RL environments, showing that our approach is capable of learning successful policies up to an order of magnitude faster than vanilla Q-learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/biedenkapp21a/biedenkapp21a.pdf", "supp": "", "pdf_size": 2924632, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16276824665719650733&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, University of Freiburg, Germany+BCAI, Renningen, Germany; Department of Computer Science, University of Freiburg, Germany+BCAI, Renningen, Germany; BCAI, Renningen, Germany; Information Processing Institute (tnt), Leibniz University Hannover, Germany", "aff_domain": "cs.uni-freiburg.de; ; ; ", "email": "cs.uni-freiburg.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/biedenkapp21a.html", "aff_unique_index": "0+1;0+1;1;2", "aff_unique_norm": "University of Freiburg;BCAI;Leibniz University Hannover", "aff_unique_dep": "Department of Computer Science;;Information Processing Institute", "aff_unique_url": "https://www.uni-freiburg.de;;https://www.leibniz-university-hannover.de", "aff_unique_abbr": ";;", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Renningen", "aff_country_unique_index": "0+0;0+0;0;0", "aff_country_unique": "Germany" }, { "title": "Temporal Difference Learning as Gradient Splitting", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10519", "id": "10519", "proceeding": "http://proceedings.mlr.press/v139/liu21q.html", "slides": "/media/icml-2021/Slides/10519.pdf", "author_site": "Rui Liu, Alex Olshevsky", "author": "Rui Liu; Alex Olshevsky", "abstract": "Temporal difference learning with linear function approximation is a popular method to obtain a low-dimensional approximation of the value function of a policy in a Markov Decision Process. We provide an interpretation of this method in terms of a splitting of the gradient of an appropriately chosen function. As a consequence of this interpretation, convergence proofs for gradient descent can be applied almost verbatim to temporal difference learning. Beyond giving a fuller explanation of why temporal difference works, this interpretation also yields improved convergence times. We consider the setting with $1/\\sqrt{T}$ step-size, where previous comparable finite-time convergence time bounds for temporal difference learning had the multiplicative factor $1/(1-\\gamma)$ in front of the bound, with $\\gamma$ being the discount factor. We show that a minor variation on TD learning which estimates the mean of the value function separately has a convergence time where $1/(1-\\gamma)$ only multiplies an asymptotically negligible term.", "bibtex": "@InProceedings{pmlr-v139-liu21q,\n title = \t {Temporal Difference Learning as Gradient Splitting},\n author = {Liu, Rui and Olshevsky, Alex},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6905--6913},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21q/liu21q.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21q.html},\n abstract = \t {Temporal difference learning with linear function approximation is a popular method to obtain a low-dimensional approximation of the value function of a policy in a Markov Decision Process. We provide an interpretation of this method in terms of a splitting of the gradient of an appropriately chosen function. As a consequence of this interpretation, convergence proofs for gradient descent can be applied almost verbatim to temporal difference learning. Beyond giving a fuller explanation of why temporal difference works, this interpretation also yields improved convergence times. We consider the setting with $1/\\sqrt{T}$ step-size, where previous comparable finite-time convergence time bounds for temporal difference learning had the multiplicative factor $1/(1-\\gamma)$ in front of the bound, with $\\gamma$ being the discount factor. We show that a minor variation on TD learning which estimates the mean of the value function separately has a convergence time where $1/(1-\\gamma)$ only multiplies an asymptotically negligible term.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21q/liu21q.pdf", "supp": "", "pdf_size": 190982, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12720183483990253425&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Division of Systems Engineering, Boston University, Boston, MA, USA; Department of ECE and Division of Systems Engineering, Boston University, Boston, MA, USA", "aff_domain": "bu.edu; ", "email": "bu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/liu21q.html", "aff_unique_index": "0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "Division of Systems Engineering", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Temporal Predictive Coding For Model-Based Planning In Latent Space", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9447", "id": "9447", "proceeding": "http://proceedings.mlr.press/v139/nguyen21h.html", "slides": "/media/icml-2021/Slides/9447.pdf", "author_site": "Tung Nguyen, Rui Shu, Tuan Pham, Hung Bui, Stefano Ermon", "author": "Tung D Nguyen; Rui Shu; Tuan Pham; Hung Bui; Stefano Ermon", "abstract": "High-dimensional observations are a major challenge in the application of model-based reinforcement learning (MBRL) to real-world environments. To handle high-dimensional sensory inputs, existing approaches use representation learning to map high-dimensional observations into a lower-dimensional latent space that is more amenable to dynamics estimation and planning. In this work, we present an information-theoretic approach that employs temporal predictive coding to encode elements in the environment that can be predicted across time. Since this approach focuses on encoding temporally-predictable information, we implicitly prioritize the encoding of task-relevant components over nuisance information within the environment that are provably task-irrelevant. By learning this representation in conjunction with a recurrent state space model, we can then perform planning in latent space. We evaluate our model on a challenging modification of standard DMControl tasks where the background is replaced with natural videos that contain complex but irrelevant information to the planning task. Our experiments show that our model is superior to existing methods in the challenging complex-background setting while remaining competitive with current state-of-the-art models in the standard setting.", "bibtex": "@InProceedings{pmlr-v139-nguyen21h,\n title = \t {Temporal Predictive Coding For Model-Based Planning In Latent Space},\n author = {Nguyen, Tung D and Shu, Rui and Pham, Tuan and Bui, Hung and Ermon, Stefano},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8130--8139},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21h/nguyen21h.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21h.html},\n abstract = \t {High-dimensional observations are a major challenge in the application of model-based reinforcement learning (MBRL) to real-world environments. To handle high-dimensional sensory inputs, existing approaches use representation learning to map high-dimensional observations into a lower-dimensional latent space that is more amenable to dynamics estimation and planning. In this work, we present an information-theoretic approach that employs temporal predictive coding to encode elements in the environment that can be predicted across time. Since this approach focuses on encoding temporally-predictable information, we implicitly prioritize the encoding of task-relevant components over nuisance information within the environment that are provably task-irrelevant. By learning this representation in conjunction with a recurrent state space model, we can then perform planning in latent space. We evaluate our model on a challenging modification of standard DMControl tasks where the background is replaced with natural videos that contain complex but irrelevant information to the planning task. Our experiments show that our model is superior to existing methods in the challenging complex-background setting while remaining competitive with current state-of-the-art models in the standard setting.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21h/nguyen21h.pdf", "supp": "", "pdf_size": 2002522, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3538175224017853269&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "VinAI Research+Stanford University; Stanford University; VinAI Research; VinAI Research; Stanford University", "aff_domain": "vinai.io;stanford.edu; ; ; ", "email": "vinai.io;stanford.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/nguyen21h.html", "aff_unique_index": "0+1;1;0;0;1", "aff_unique_norm": "VinAI Research;Stanford University", "aff_unique_dep": ";", "aff_unique_url": "https://www.vinai.io/;https://www.stanford.edu", "aff_unique_abbr": "VinAI;Stanford", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0+1;1;0;0;1", "aff_country_unique": "Vietnam;United States" }, { "title": "Temporally Correlated Task Scheduling for Sequence Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9449", "id": "9449", "proceeding": "http://proceedings.mlr.press/v139/wu21e.html", "slides": "", "author_site": "Xueqing Wu, Lewen Wang, Yingce Xia, Weiqing Liu, Lijun Wu, Shufang Xie, Tao Qin, Tie-Yan Liu", "author": "Xueqing Wu; Lewen Wang; Yingce Xia; Weiqing Liu; Lijun Wu; Shufang Xie; Tao Qin; Tie-Yan Liu", "abstract": "Sequence learning has attracted much research attention from the machine learning community in recent years. In many applications, a sequence learning task is usually associated with multiple temporally correlated auxiliary tasks, which are different in terms of how much input information to use or which future step to predict. For example, (i) in simultaneous machine translation, one can conduct translation under different latency (i.e., how many input words to read/wait before translation); (ii) in stock trend forecasting, one can predict the price of a stock in different future days (e.g., tomorrow, the day after tomorrow). While it is clear that those temporally correlated tasks can help each other, there is a very limited exploration on how to better leverage multiple auxiliary tasks to boost the performance of the main task. In this work, we introduce a learnable scheduler to sequence learning, which can adaptively select auxiliary tasks for training depending on the model status and the current training data. The scheduler and the model for the main task are jointly trained through bi-level optimization. Experiments show that our method significantly improves the performance of simultaneous machine translation and stock trend forecasting.", "bibtex": "@InProceedings{pmlr-v139-wu21e,\n title = \t {Temporally Correlated Task Scheduling for Sequence Learning},\n author = {Wu, Xueqing and Wang, Lewen and Xia, Yingce and Liu, Weiqing and Wu, Lijun and Xie, Shufang and Qin, Tao and Liu, Tie-Yan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11274--11284},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21e/wu21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21e.html},\n abstract = \t {Sequence learning has attracted much research attention from the machine learning community in recent years. In many applications, a sequence learning task is usually associated with multiple temporally correlated auxiliary tasks, which are different in terms of how much input information to use or which future step to predict. For example, (i) in simultaneous machine translation, one can conduct translation under different latency (i.e., how many input words to read/wait before translation); (ii) in stock trend forecasting, one can predict the price of a stock in different future days (e.g., tomorrow, the day after tomorrow). While it is clear that those temporally correlated tasks can help each other, there is a very limited exploration on how to better leverage multiple auxiliary tasks to boost the performance of the main task. In this work, we introduce a learnable scheduler to sequence learning, which can adaptively select auxiliary tasks for training depending on the model status and the current training data. The scheduler and the model for the main task are jointly trained through bi-level optimization. Experiments show that our method significantly improves the performance of simultaneous machine translation and stock trend forecasting.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21e/wu21e.pdf", "supp": "", "pdf_size": 900989, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5073529240621763307&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Science and Technology of China; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research", "aff_domain": "microsoft.com; ; ; ; ; ; ; ", "email": "microsoft.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/wu21e.html", "aff_unique_index": "0;1;1;1;1;1;1;1", "aff_unique_norm": "University of Science and Technology of China;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "USTC;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;1;1;1;1;1", "aff_country_unique": "China;United States" }, { "title": "Tensor Programs IIb: Architectural Universality Of Neural Tangent Kernel Training Dynamics", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10765", "id": "10765", "proceeding": "http://proceedings.mlr.press/v139/yang21f.html", "slides": "", "author_site": "Greg Yang, Etai Littwin", "author": "Greg Yang; Etai Littwin", "abstract": "Yang (2020) recently showed that the Neural Tangent Kernel (NTK) at initialization has an infinite-width limit for a large class of architectures including modern staples such as ResNet and Transformers. However, their analysis does not apply to training. Here, we show the same neural networks (in the so-called NTK parametrization) during training follow a kernel gradient descent dynamics in function space, where the kernel is the infinite-width NTK. This completes the proof of the architectural universality of NTK behavior. To achieve this result, we apply the Tensor Programs technique: Write the entire SGD dynamics inside a Tensor Program and analyze it via the Master Theorem. To facilitate this proof, we develop a graphical notation for Tensor Programs, which we believe is also an important contribution toward the pedagogy and exposition of the Tensor Programs technique.", "bibtex": "@InProceedings{pmlr-v139-yang21f,\n title = \t {Tensor Programs IIb: Architectural Universality Of Neural Tangent Kernel Training Dynamics},\n author = {Yang, Greg and Littwin, Etai},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11762--11772},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21f/yang21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21f.html},\n abstract = \t {Yang (2020) recently showed that the Neural Tangent Kernel (NTK) at initialization has an infinite-width limit for a large class of architectures including modern staples such as ResNet and Transformers. However, their analysis does not apply to training. Here, we show the same neural networks (in the so-called NTK parametrization) during training follow a kernel gradient descent dynamics in function space, where the kernel is the infinite-width NTK. This completes the proof of the architectural universality of NTK behavior. To achieve this result, we apply the Tensor Programs technique: Write the entire SGD dynamics inside a Tensor Program and analyze it via the Master Theorem. To facilitate this proof, we develop a graphical notation for Tensor Programs, which we believe is also an important contribution toward the pedagogy and exposition of the Tensor Programs technique.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21f/yang21f.pdf", "supp": "", "pdf_size": 1256834, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2811840238345034061&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Microsoft Research; Apple Research", "aff_domain": "microsoft.com;apple.com", "email": "microsoft.com;apple.com", "github": "", "project": "arxiv.org/abs/2105.03703", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yang21f.html", "aff_unique_index": "0;1", "aff_unique_norm": "Microsoft;Apple", "aff_unique_dep": "Microsoft Research;Apple Research", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.apple.com/research/", "aff_unique_abbr": "MSR;Apple", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Tensor Programs IV: Feature Learning in Infinite-Width Neural Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10689", "id": "10689", "proceeding": "http://proceedings.mlr.press/v139/yang21c.html", "slides": "", "author_site": "Greg Yang, Edward Hu", "author": "Greg Yang; Edward J. Hu", "abstract": "As its width tends to infinity, a deep neural network\u2019s behavior under gradient descent can become simplified and predictable (e.g. given by the Neural Tangent Kernel (NTK)), if it is parametrized appropriately (e.g. the NTK parametrization). However, we show that the standard and NTK parametrizations of a neural network do not admit infinite-width limits that can *learn* features, which is crucial for pretraining and transfer learning such as with BERT. We propose simple modifications to the standard parametrization to allow for feature learning in the limit. Using the *Tensor Programs* technique, we derive explicit formulas for such limits. On Word2Vec and few-shot learning on Omniglot via MAML, two canonical tasks that rely crucially on feature learning, we compute these limits exactly. We find that they outperform both NTK baselines and finite-width networks, with the latter approaching the infinite-width feature learning performance as width increases.", "bibtex": "@InProceedings{pmlr-v139-yang21c,\n title = \t {Tensor Programs IV: Feature Learning in Infinite-Width Neural Networks},\n author = {Yang, Greg and Hu, Edward J.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11727--11737},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21c/yang21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21c.html},\n abstract = \t {As its width tends to infinity, a deep neural network\u2019s behavior under gradient descent can become simplified and predictable (e.g. given by the Neural Tangent Kernel (NTK)), if it is parametrized appropriately (e.g. the NTK parametrization). However, we show that the standard and NTK parametrizations of a neural network do not admit infinite-width limits that can *learn* features, which is crucial for pretraining and transfer learning such as with BERT. We propose simple modifications to the standard parametrization to allow for feature learning in the limit. Using the *Tensor Programs* technique, we derive explicit formulas for such limits. On Word2Vec and few-shot learning on Omniglot via MAML, two canonical tasks that rely crucially on feature learning, we compute these limits exactly. We find that they outperform both NTK baselines and finite-width networks, with the latter approaching the infinite-width feature learning performance as width increases.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21c/yang21c.pdf", "supp": "", "pdf_size": 1111531, "gs_citation": 261, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=971634146089626804&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 3, "aff": "Microsoft Research AI; Microsoft Dynamics 365 AI + Microsoft AI Residency Program", "aff_domain": "microsoft.com; ", "email": "microsoft.com; ", "github": "", "project": "arXiv:2011.14522", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/yang21c.html", "aff_unique_index": "0;0+0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "AI", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "United States" }, { "title": "TeraPipe: Token-Level Pipeline Parallelism for Training Large-Scale Language Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9181", "id": "9181", "proceeding": "http://proceedings.mlr.press/v139/li21y.html", "slides": "", "author_site": "Zhuohan Li, Siyuan Zhuang, Shiyuan Guo, Danyang Zhuo, Hao Zhang, Dawn Song, Ion Stoica", "author": "Zhuohan Li; Siyuan Zhuang; Shiyuan Guo; Danyang Zhuo; Hao Zhang; Dawn Song; Ion Stoica", "abstract": "Model parallelism has become a necessity for training modern large-scale deep language models. In this work, we identify a new and orthogonal dimension from existing model parallel approaches: it is possible to perform pipeline parallelism within a single training sequence for Transformer-based language models thanks to its autoregressive property. This enables a more fine-grained pipeline compared with previous work. With this key idea, we design TeraPipe, a high-performance token-level pipeline parallel algorithm for synchronous model-parallel training of Transformer-based language models. We develop a novel dynamic programming-based algorithm to calculate the optimal pipelining execution scheme given a specific model and cluster configuration. We show that TeraPipe can speed up the training by 5.0x for the largest GPT-3 model with 175 billion parameters on an AWS cluster with 48 p3.16xlarge instances compared with state-of-the-art model-parallel methods. The code for reproduction can be found at https://github.com/zhuohan123/terapipe", "bibtex": "@InProceedings{pmlr-v139-li21y,\n title = \t {TeraPipe: Token-Level Pipeline Parallelism for Training Large-Scale Language Models},\n author = {Li, Zhuohan and Zhuang, Siyuan and Guo, Shiyuan and Zhuo, Danyang and Zhang, Hao and Song, Dawn and Stoica, Ion},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6543--6552},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21y/li21y.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21y.html},\n abstract = \t {Model parallelism has become a necessity for training modern large-scale deep language models. In this work, we identify a new and orthogonal dimension from existing model parallel approaches: it is possible to perform pipeline parallelism within a single training sequence for Transformer-based language models thanks to its autoregressive property. This enables a more fine-grained pipeline compared with previous work. With this key idea, we design TeraPipe, a high-performance token-level pipeline parallel algorithm for synchronous model-parallel training of Transformer-based language models. We develop a novel dynamic programming-based algorithm to calculate the optimal pipelining execution scheme given a specific model and cluster configuration. We show that TeraPipe can speed up the training by 5.0x for the largest GPT-3 model with 175 billion parameters on an AWS cluster with 48 p3.16xlarge instances compared with state-of-the-art model-parallel methods. The code for reproduction can be found at https://github.com/zhuohan123/terapipe}\n}", "pdf": "http://proceedings.mlr.press/v139/li21y/li21y.pdf", "supp": "", "pdf_size": 3639569, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9109745061137409325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "UC Berkeley; UC Berkeley; UC Berkeley; Duke University; UC Berkeley; UC Berkeley; UC Berkeley", "aff_domain": "cs.berkeley.edu; ; ; ; ; ; ", "email": "cs.berkeley.edu; ; ; ; ; ; ", "github": "https://github.com/zhuohan123/terapipe", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/li21y.html", "aff_unique_index": "0;0;0;1;0;0;0", "aff_unique_norm": "University of California, Berkeley;Duke University", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://www.duke.edu", "aff_unique_abbr": "UC Berkeley;Duke", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Tesseract: Tensorised Actors for Multi-Agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8723", "id": "8723", "proceeding": "http://proceedings.mlr.press/v139/mahajan21a.html", "slides": "/media/icml-2021/Slides/8723.pdf", "author_site": "Anuj Mahajan, Mikayel Samvelyan, Lei Mao, Viktor Makoviychuk, Animesh Garg, Jean Kossaifi, Shimon Whiteson, Yuke Zhu, Anima Anandkumar", "author": "Anuj Mahajan; Mikayel Samvelyan; Lei Mao; Viktor Makoviychuk; Animesh Garg; Jean Kossaifi; Shimon Whiteson; Yuke Zhu; Animashree Anandkumar", "abstract": "Reinforcement Learning in large action spaces is a challenging problem. This is especially true for cooperative multi-agent reinforcement learning (MARL), which often requires tractable learning while respecting various constraints like communication budget and information about other agents. In this work, we focus on the fundamental hurdle affecting both value-based and policy-gradient approaches: an exponential blowup of the action space with the number of agents. For value-based methods, it poses challenges in accurately representing the optimal value function for value-based methods, thus inducing suboptimality. For policy gradient methods, it renders the critic ineffective and exacerbates the problem of the lagging critic. We show that from a learning theory perspective, both problems can be addressed by accurately representing the associated action-value function with a low-complexity hypothesis class. This requires accurately modelling the agent interactions in a sample efficient way. To this end, we propose a novel tensorised formulation of the Bellman equation. This gives rise to our method Tesseract, which utilises the view of Q-function seen as a tensor where the modes correspond to action spaces of different agents. Algorithms derived from Tesseract decompose the Q-tensor across the agents and utilise low-rank tensor approximations to model the agent interactions relevant to the task. We provide PAC analysis for Tesseract based algorithms and highlight their relevance to the class of rich observation MDPs. Empirical results in different domains confirm the gains in sample efficiency using Tesseract as supported by the theory.", "bibtex": "@InProceedings{pmlr-v139-mahajan21a,\n title = \t {Tesseract: Tensorised Actors for Multi-Agent Reinforcement Learning},\n author = {Mahajan, Anuj and Samvelyan, Mikayel and Mao, Lei and Makoviychuk, Viktor and Garg, Animesh and Kossaifi, Jean and Whiteson, Shimon and Zhu, Yuke and Anandkumar, Animashree},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7301--7312},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/mahajan21a/mahajan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/mahajan21a.html},\n abstract = \t {Reinforcement Learning in large action spaces is a challenging problem. This is especially true for cooperative multi-agent reinforcement learning (MARL), which often requires tractable learning while respecting various constraints like communication budget and information about other agents. In this work, we focus on the fundamental hurdle affecting both value-based and policy-gradient approaches: an exponential blowup of the action space with the number of agents. For value-based methods, it poses challenges in accurately representing the optimal value function for value-based methods, thus inducing suboptimality. For policy gradient methods, it renders the critic ineffective and exacerbates the problem of the lagging critic. We show that from a learning theory perspective, both problems can be addressed by accurately representing the associated action-value function with a low-complexity hypothesis class. This requires accurately modelling the agent interactions in a sample efficient way. To this end, we propose a novel tensorised formulation of the Bellman equation. This gives rise to our method Tesseract, which utilises the view of Q-function seen as a tensor where the modes correspond to action spaces of different agents. Algorithms derived from Tesseract decompose the Q-tensor across the agents and utilise low-rank tensor approximations to model the agent interactions relevant to the task. We provide PAC analysis for Tesseract based algorithms and highlight their relevance to the class of rich observation MDPs. Empirical results in different domains confirm the gains in sample efficiency using Tesseract as supported by the theory.}\n}", "pdf": "http://proceedings.mlr.press/v139/mahajan21a/mahajan21a.pdf", "supp": "", "pdf_size": 851851, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16845867820442761597&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of Oxford; University College London; NVIDIA; NVIDIA; NVIDIA; NVIDIA; University of Oxford; NVIDIA; NVIDIA", "aff_domain": "cs.ox.ac.uk; ; ; ; ; ;cs.ox.ac.uk; ; ", "email": "cs.ox.ac.uk; ; ; ; ; ;cs.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v139/mahajan21a.html", "aff_unique_index": "0;1;2;2;2;2;0;2;2", "aff_unique_norm": "University of Oxford;University College London;NVIDIA", "aff_unique_dep": ";;NVIDIA Corporation", "aff_unique_url": "https://www.ox.ac.uk;https://www.ucl.ac.uk;https://www.nvidia.com", "aff_unique_abbr": "Oxford;UCL;NVIDIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;1;1;1;0;1;1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Testing DNN-based Autonomous Driving Systems under Critical Environmental Conditions", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9405", "id": "9405", "proceeding": "http://proceedings.mlr.press/v139/li21r.html", "slides": "", "author_site": "Zhong Li, Minxue Pan, Tian Zhang, Xuandong Li", "author": "Zhong Li; Minxue Pan; Tian Zhang; Xuandong Li", "abstract": "Due to the increasing usage of Deep Neural Network (DNN) based autonomous driving systems (ADS) where erroneous or unexpected behaviours can lead to catastrophic accidents, testing such systems is of growing importance. Existing approaches often just focus on finding erroneous behaviours and have not thoroughly studied the impact of environmental conditions. In this paper, we propose to test DNN-based ADS under different environmental conditions to identify the critical ones, that is, the environmental conditions under which the ADS are more prone to errors. To tackle the problem of the space of environmental conditions being extremely large, we present a novel approach named TACTIC that employs the search-based method to identify critical environmental conditions generated by an image-to-image translation model. Large-scale experiments show that TACTIC can effectively identify critical environmental conditions and produce realistic testing images, and meanwhile, reveal more erroneous behaviours compared to existing approaches.", "bibtex": "@InProceedings{pmlr-v139-li21r,\n title = \t {Testing DNN-based Autonomous Driving Systems under Critical Environmental Conditions},\n author = {Li, Zhong and Pan, Minxue and Zhang, Tian and Li, Xuandong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6471--6482},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21r/li21r.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21r.html},\n abstract = \t {Due to the increasing usage of Deep Neural Network (DNN) based autonomous driving systems (ADS) where erroneous or unexpected behaviours can lead to catastrophic accidents, testing such systems is of growing importance. Existing approaches often just focus on finding erroneous behaviours and have not thoroughly studied the impact of environmental conditions. In this paper, we propose to test DNN-based ADS under different environmental conditions to identify the critical ones, that is, the environmental conditions under which the ADS are more prone to errors. To tackle the problem of the space of environmental conditions being extremely large, we present a novel approach named TACTIC that employs the search-based method to identify critical environmental conditions generated by an image-to-image translation model. Large-scale experiments show that TACTIC can effectively identify critical environmental conditions and produce realistic testing images, and meanwhile, reveal more erroneous behaviours compared to existing approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21r/li21r.pdf", "supp": "", "pdf_size": 2348093, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2313619425438894441&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China+Department of Computer Science and Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China+Software Institute, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China+Department of Computer Science and Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Nanjing University, Nanjing, China+Department of Computer Science and Technology, Nanjing University, Nanjing, China", "aff_domain": "nju.edu.cn; ; ;", "email": "nju.edu.cn; ; ;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/li21r.html", "aff_unique_index": "0+0;0+0;0+0;0+0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "State Key Laboratory for Novel Software Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "NU", "aff_campus_unique_index": "0+0;0+0;0+0;0+0", "aff_campus_unique": "Nanjing", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "China" }, { "title": "Testing Group Fairness via Optimal Transport Projections", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10251", "id": "10251", "proceeding": "http://proceedings.mlr.press/v139/si21a.html", "slides": "", "author_site": "Nian Si, Karthyek Murthy, Jose Blanchet, Viet Anh Nguyen", "author": "Nian Si; Karthyek Murthy; Jose Blanchet; Viet Anh Nguyen", "abstract": "We have developed a statistical testing framework to detect if a given machine learning classifier fails to satisfy a wide range of group fairness notions. Our test is a flexible, interpretable, and statistically rigorous tool for auditing whether exhibited biases are intrinsic to the algorithm or simply due to the randomness in the data. The statistical challenges, which may arise from multiple impact criteria that define group fairness and which are discontinuous on model parameters, are conveniently tackled by projecting the empirical measure to the set of group-fair probability models using optimal transport. This statistic is efficiently computed using linear programming, and its asymptotic distribution is explicitly obtained. The proposed framework can also be used to test for composite fairness hypotheses and fairness with multiple sensitive attributes. The optimal transport testing formulation improves interpretability by characterizing the minimal covariate perturbations that eliminate the bias observed in the audit.", "bibtex": "@InProceedings{pmlr-v139-si21a,\n title = \t {Testing Group Fairness via Optimal Transport Projections},\n author = {Si, Nian and Murthy, Karthyek and Blanchet, Jose and Nguyen, Viet Anh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9649--9659},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/si21a/si21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/si21a.html},\n abstract = \t {We have developed a statistical testing framework to detect if a given machine learning classifier fails to satisfy a wide range of group fairness notions. Our test is a flexible, interpretable, and statistically rigorous tool for auditing whether exhibited biases are intrinsic to the algorithm or simply due to the randomness in the data. The statistical challenges, which may arise from multiple impact criteria that define group fairness and which are discontinuous on model parameters, are conveniently tackled by projecting the empirical measure to the set of group-fair probability models using optimal transport. This statistic is efficiently computed using linear programming, and its asymptotic distribution is explicitly obtained. The proposed framework can also be used to test for composite fairness hypotheses and fairness with multiple sensitive attributes. The optimal transport testing formulation improves interpretability by characterizing the minimal covariate perturbations that eliminate the bias observed in the audit.}\n}", "pdf": "http://proceedings.mlr.press/v139/si21a/si21a.pdf", "supp": "", "pdf_size": 508391, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15803205820405123367&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Management Science & Engineering, Stanford University; Engineering Systems and Design pillar, Singapore University of Technology and Design; Department of Management Science & Engineering, Stanford University; VinAI Research, Vietnam", "aff_domain": "stanford.edu; ; ; ", "email": "stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/si21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "Stanford University;Singapore University of Technology and Design;VinAI Research", "aff_unique_dep": "Department of Management Science & Engineering;Engineering Systems and Design pillar;", "aff_unique_url": "https://www.stanford.edu;https://www.sutd.edu.sg;https://www.vin.ai", "aff_unique_abbr": "Stanford;SUTD;VinAI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;1;0;2", "aff_country_unique": "United States;Singapore;Vietnam" }, { "title": "The Distributed Discrete Gaussian Mechanism for Federated Learning with Secure Aggregation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9805", "id": "9805", "proceeding": "http://proceedings.mlr.press/v139/kairouz21a.html", "slides": "", "author_site": "Peter Kairouz, Ziyu Liu, Thomas Steinke", "author": "Peter Kairouz; Ziyu Liu; Thomas Steinke", "abstract": "We consider training models on private data that are distributed across user devices. To ensure privacy, we add on-device noise and use secure aggregation so that only the noisy sum is revealed to the server. We present a comprehensive end-to-end system, which appropriately discretizes the data and adds discrete Gaussian noise before performing secure aggregation. We provide a novel privacy analysis for sums of discrete Gaussians and carefully analyze the effects of data quantization and modular summation arithmetic. Our theoretical guarantees highlight the complex tension between communication, privacy, and accuracy. Our extensive experimental results demonstrate that our solution is essentially able to match the accuracy to central differential privacy with less than 16 bits of precision per value.", "bibtex": "@InProceedings{pmlr-v139-kairouz21a,\n title = \t {The Distributed Discrete Gaussian Mechanism for Federated Learning with Secure Aggregation},\n author = {Kairouz, Peter and Liu, Ziyu and Steinke, Thomas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5201--5212},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kairouz21a/kairouz21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kairouz21a.html},\n abstract = \t {We consider training models on private data that are distributed across user devices. To ensure privacy, we add on-device noise and use secure aggregation so that only the noisy sum is revealed to the server. We present a comprehensive end-to-end system, which appropriately discretizes the data and adds discrete Gaussian noise before performing secure aggregation. We provide a novel privacy analysis for sums of discrete Gaussians and carefully analyze the effects of data quantization and modular summation arithmetic. Our theoretical guarantees highlight the complex tension between communication, privacy, and accuracy. Our extensive experimental results demonstrate that our solution is essentially able to match the accuracy to central differential privacy with less than 16 bits of precision per value.}\n}", "pdf": "http://proceedings.mlr.press/v139/kairouz21a/kairouz21a.pdf", "supp": "", "pdf_size": 3345454, "gs_citation": 288, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12466254511773747915&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research; Google Research; Google Research", "aff_domain": "google.com;google.com;thomas-steinke.net", "email": "google.com;google.com;thomas-steinke.net", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kairouz21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "The Earth Mover\u2019s Pinball Loss: Quantiles for Histogram-Valued Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9859", "id": "9859", "proceeding": "http://proceedings.mlr.press/v139/list21a.html", "slides": "", "author": "Florian List", "abstract": "Although ubiquitous in the sciences, histogram data have not received much attention by the Deep Learning community. Whilst regression and classification tasks for scalar and vector data are routinely solved by neural networks, a principled approach for estimating histogram labels as a function of an input vector or image is lacking in the literature. We present a dedicated method for Deep Learning-based histogram regression, which incorporates cross-bin information and yields distributions over possible histograms, expressed by $\\tau$-quantiles of the cumulative histogram in each bin. The crux of our approach is a new loss function obtained by applying the pinball loss to the cumulative histogram, which for 1D histograms reduces to the Earth Mover\u2019s distance (EMD) in the special case of the median ($\\tau = 0.5$), and generalizes it to arbitrary quantiles. We validate our method with an illustrative toy example, a football-related task, and an astrophysical computer vision problem. We show that with our loss function, the accuracy of the predicted median histograms is very similar to the standard EMD case (and higher than for per-bin loss functions such as cross-entropy), while the predictions become much more informative at almost no additional computational cost.", "bibtex": "@InProceedings{pmlr-v139-list21a,\n title = \t {The Earth Mover\u2019s Pinball Loss: Quantiles for Histogram-Valued Regression},\n author = {List, Florian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6713--6724},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/list21a/list21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/list21a.html},\n abstract = \t {Although ubiquitous in the sciences, histogram data have not received much attention by the Deep Learning community. Whilst regression and classification tasks for scalar and vector data are routinely solved by neural networks, a principled approach for estimating histogram labels as a function of an input vector or image is lacking in the literature. We present a dedicated method for Deep Learning-based histogram regression, which incorporates cross-bin information and yields distributions over possible histograms, expressed by $\\tau$-quantiles of the cumulative histogram in each bin. The crux of our approach is a new loss function obtained by applying the pinball loss to the cumulative histogram, which for 1D histograms reduces to the Earth Mover\u2019s distance (EMD) in the special case of the median ($\\tau = 0.5$), and generalizes it to arbitrary quantiles. We validate our method with an illustrative toy example, a football-related task, and an astrophysical computer vision problem. We show that with our loss function, the accuracy of the predicted median histograms is very similar to the standard EMD case (and higher than for per-bin loss functions such as cross-entropy), while the predictions become much more informative at almost no additional computational cost.}\n}", "pdf": "http://proceedings.mlr.press/v139/list21a/list21a.pdf", "supp": "", "pdf_size": 3239855, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8815411661488065090&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "The University of Sydney, Sydney Institute for Astronomy, School of Physics, A28, NSW 2006, Australia", "aff_domain": "sydney.edu.au", "email": "sydney.edu.au", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/list21a.html", "aff_unique_index": "0", "aff_unique_norm": "University of Sydney", "aff_unique_dep": "School of Physics", "aff_unique_url": "https://www.sydney.edu.au", "aff_unique_abbr": "USYD", "aff_campus_unique_index": "0", "aff_campus_unique": "Sydney", "aff_country_unique_index": "0", "aff_country_unique": "Australia" }, { "title": "The Emergence of Individuality", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8933", "id": "8933", "proceeding": "http://proceedings.mlr.press/v139/jiang21g.html", "slides": "", "author_site": "Jiechuan Jiang, Zongqing Lu", "author": "Jiechuan Jiang; Zongqing Lu", "abstract": "Individuality is essential in human society. It induces the division of labor and thus improves the efficiency and productivity. Similarly, it should also be a key to multi-agent cooperation. Inspired by that individuality is of being an individual separate from others, we propose a simple yet efficient method for the emergence of individuality (EOI) in multi-agent reinforcement learning (MARL). EOI learns a probabilistic classifier that predicts a probability distribution over agents given their observation and gives each agent an intrinsic reward of being correctly predicted by the classifier. The intrinsic reward encourages the agents to visit their own familiar observations, and learning the classifier by such observations makes the intrinsic reward signals stronger and in turn makes the agents more identifiable. To further enhance the intrinsic reward and promote the emergence of individuality, two regularizers are proposed to increase the discriminability of the classifier. We implement EOI on top of popular MARL algorithms. Empirically, we show that EOI outperforms existing methods in a variety of multi-agent cooperative scenarios.", "bibtex": "@InProceedings{pmlr-v139-jiang21g,\n title = \t {The Emergence of Individuality},\n author = {Jiang, Jiechuan and Lu, Zongqing},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4992--5001},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jiang21g/jiang21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/jiang21g.html},\n abstract = \t {Individuality is essential in human society. It induces the division of labor and thus improves the efficiency and productivity. Similarly, it should also be a key to multi-agent cooperation. Inspired by that individuality is of being an individual separate from others, we propose a simple yet efficient method for the emergence of individuality (EOI) in multi-agent reinforcement learning (MARL). EOI learns a probabilistic classifier that predicts a probability distribution over agents given their observation and gives each agent an intrinsic reward of being correctly predicted by the classifier. The intrinsic reward encourages the agents to visit their own familiar observations, and learning the classifier by such observations makes the intrinsic reward signals stronger and in turn makes the agents more identifiable. To further enhance the intrinsic reward and promote the emergence of individuality, two regularizers are proposed to increase the discriminability of the classifier. We implement EOI on top of popular MARL algorithms. Empirically, we show that EOI outperforms existing methods in a variety of multi-agent cooperative scenarios.}\n}", "pdf": "http://proceedings.mlr.press/v139/jiang21g/jiang21g.pdf", "supp": "", "pdf_size": 1944202, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14317819395835503476&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Peking University; Peking University", "aff_domain": "pku.edu.cn;pku.edu.cn", "email": "pku.edu.cn;pku.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jiang21g.html", "aff_unique_index": "0;0", "aff_unique_norm": "Peking University", "aff_unique_dep": "", "aff_unique_url": "http://www.pku.edu.cn", "aff_unique_abbr": "Peking U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "The Heavy-Tail Phenomenon in SGD", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9017", "id": "9017", "proceeding": "http://proceedings.mlr.press/v139/gurbuzbalaban21a.html", "slides": "", "author_site": "Mert Gurbuzbalaban, Umut Simsekli, Lingjiong Zhu", "author": "Mert Gurbuzbalaban; Umut Simsekli; Lingjiong Zhu", "abstract": "In recent years, various notions of capacity and complexity have been proposed for characterizing the generalization properties of stochastic gradient descent (SGD) in deep learning. Some of the popular notions that correlate well with the performance on unseen data are (i) the \u2018flatness\u2019 of the local minimum found by SGD, which is related to the eigenvalues of the Hessian, (ii) the ratio of the stepsize $\\eta$ to the batch-size $b$, which essentially controls the magnitude of the stochastic gradient noise, and (iii) the \u2018tail-index\u2019, which measures the heaviness of the tails of the network weights at convergence. In this paper, we argue that these three seemingly unrelated perspectives for generalization are deeply linked to each other. We claim that depending on the structure of the Hessian of the loss at the minimum, and the choices of the algorithm parameters $\\eta$ and $b$, the SGD iterates will converge to a \\emph{heavy-tailed} stationary distribution. We rigorously prove this claim in the setting of quadratic optimization: we show that even in a simple linear regression problem with independent and identically distributed data whose distribution has finite moments of all order, the iterates can be heavy-tailed with infinite variance. We further characterize the behavior of the tails with respect to algorithm parameters, the dimension, and the curvature. We then translate our results into insights about the behavior of SGD in deep learning. We support our theory with experiments conducted on synthetic data, fully connected, and convolutional neural networks.", "bibtex": "@InProceedings{pmlr-v139-gurbuzbalaban21a,\n title = \t {The Heavy-Tail Phenomenon in SGD},\n author = {Gurbuzbalaban, Mert and Simsekli, Umut and Zhu, Lingjiong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3964--3975},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gurbuzbalaban21a/gurbuzbalaban21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gurbuzbalaban21a.html},\n abstract = \t {In recent years, various notions of capacity and complexity have been proposed for characterizing the generalization properties of stochastic gradient descent (SGD) in deep learning. Some of the popular notions that correlate well with the performance on unseen data are (i) the \u2018flatness\u2019 of the local minimum found by SGD, which is related to the eigenvalues of the Hessian, (ii) the ratio of the stepsize $\\eta$ to the batch-size $b$, which essentially controls the magnitude of the stochastic gradient noise, and (iii) the \u2018tail-index\u2019, which measures the heaviness of the tails of the network weights at convergence. In this paper, we argue that these three seemingly unrelated perspectives for generalization are deeply linked to each other. We claim that depending on the structure of the Hessian of the loss at the minimum, and the choices of the algorithm parameters $\\eta$ and $b$, the SGD iterates will converge to a \\emph{heavy-tailed} stationary distribution. We rigorously prove this claim in the setting of quadratic optimization: we show that even in a simple linear regression problem with independent and identically distributed data whose distribution has finite moments of all order, the iterates can be heavy-tailed with infinite variance. We further characterize the behavior of the tails with respect to algorithm parameters, the dimension, and the curvature. We then translate our results into insights about the behavior of SGD in deep learning. We support our theory with experiments conducted on synthetic data, fully connected, and convolutional neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/gurbuzbalaban21a/gurbuzbalaban21a.pdf", "supp": "", "pdf_size": 1407777, "gs_citation": 164, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11485380306468946114&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Management Science and Information Systems, Rutgers Business School, Piscataway, USA; INRIA - D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure - PSL Research University, Paris, France; Department of Mathematics, Florida State University, Tallahassee, USA", "aff_domain": "rutgers.edu;inria.fr;math.fsu.edu", "email": "rutgers.edu;inria.fr;math.fsu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/gurbuzbalaban21a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Rutgers Business School;INRIA;Florida State University", "aff_unique_dep": "Department of Management Science and Information Systems;D\u00e9partement d\u2019Informatique de l\u2019\u00c9cole Normale Sup\u00e9rieure;Department of Mathematics", "aff_unique_url": "https://business.rutgers.edu;https://www.inria.fr;https://www.fsu.edu", "aff_unique_abbr": "RBS;INRIA;FSU", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Piscataway;Paris;Tallahassee", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;France" }, { "title": "The Hintons in your Neural Network: a Quantum Field Theory View of Deep Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10761", "id": "10761", "proceeding": "http://proceedings.mlr.press/v139/bondesan21a.html", "slides": "/media/icml-2021/Slides/10761.pdf", "author_site": "Roberto Bondesan, Max Welling", "author": "Roberto Bondesan; Max Welling", "abstract": "In this work we develop a quantum field theory formalism for deep learning, where input signals are encoded in Gaussian states, a generalization of Gaussian processes which encode the agent\u2019s uncertainty about the input signal. We show how to represent linear and non-linear layers as unitary quantum gates, and interpret the fundamental excitations of the quantum model as particles, dubbed \u201cHintons\u201d. On top of opening a new perspective and techniques for studying neural networks, the quantum formulation is well suited for optical quantum computing, and provides quantum deformations of neural networks that can be run efficiently on those devices. Finally, we discuss a semi-classical limit of the quantum deformed models which is amenable to classical simulation.", "bibtex": "@InProceedings{pmlr-v139-bondesan21a,\n title = \t {The Hintons in your Neural Network: a Quantum Field Theory View of Deep Learning},\n author = {Bondesan, Roberto and Welling, Max},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1038--1048},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bondesan21a/bondesan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bondesan21a.html},\n abstract = \t {In this work we develop a quantum field theory formalism for deep learning, where input signals are encoded in Gaussian states, a generalization of Gaussian processes which encode the agent\u2019s uncertainty about the input signal. We show how to represent linear and non-linear layers as unitary quantum gates, and interpret the fundamental excitations of the quantum model as particles, dubbed \u201cHintons\u201d. On top of opening a new perspective and techniques for studying neural networks, the quantum formulation is well suited for optical quantum computing, and provides quantum deformations of neural networks that can be run efficiently on those devices. Finally, we discuss a semi-classical limit of the quantum deformed models which is amenable to classical simulation.}\n}", "pdf": "http://proceedings.mlr.press/v139/bondesan21a/bondesan21a.pdf", "supp": "", "pdf_size": 622445, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10139465897633603339&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Qualcomm AI Research, Qualcomm Technologies Netherlands B.V .; Qualcomm AI Research, Qualcomm Technologies Netherlands B.V .", "aff_domain": "qti.qualcomm.com; ", "email": "qti.qualcomm.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bondesan21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Qualcomm Technologies Netherlands B.V.", "aff_unique_dep": "Qualcomm AI Research", "aff_unique_url": "https://www.qualcomm.com/research", "aff_unique_abbr": "QTN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Netherlands" }, { "title": "The Impact of Record Linkage on Learning from Feature Partitioned Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9167", "id": "9167", "proceeding": "http://proceedings.mlr.press/v139/nock21a.html", "slides": "/media/icml-2021/Slides/9167.pdf", "author_site": "Richard Nock, Stephen J Hardy, Wilko Henecka, Hamish Ivey-Law, Jakub Nabaglo, Giorgio Patrini, Guillaume Smith, Brian Thorne", "author": "Richard Nock; Stephen Hardy; Wilko Henecka; Hamish Ivey-Law; Jakub Nabaglo; Giorgio Patrini; Guillaume Smith; Brian Thorne", "abstract": "There has been recently a significant boost to machine learning with distributed data, in particular with the success of federated learning. A common and very challenging setting is that of vertical or feature partitioned data, when multiple data providers hold different features about common entities. In general, training needs to be preceded by record linkage (RL), a step that finds the correspondence between the observations of the datasets. RL is prone to mistakes in the real world. Despite the importance of the problem, there has been so far no formal assessment of the way in which RL errors impact learning models. Work in the area either use heuristics or assume that the optimal RL is known in advance. In this paper, we provide the first assessment of the problem for supervised learning. For wide sets of losses, we provide technical conditions under which the classifier learned after noisy RL converges (with the data size) to the best classifier that would be learned from mistake-free RL. This yields new insights on the way the pipeline RL + ML operates, from the role of large margin classification on dampening the impact of RL mistakes to clues on how to further optimize RL as a preprocessing step to ML. Experiments on a large UCI benchmark validate those formal observations.", "bibtex": "@InProceedings{pmlr-v139-nock21a,\n title = \t {The Impact of Record Linkage on Learning from Feature Partitioned Data},\n author = {Nock, Richard and Hardy, Stephen and Henecka, Wilko and Ivey-Law, Hamish and Nabaglo, Jakub and Patrini, Giorgio and Smith, Guillaume and Thorne, Brian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8216--8226},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nock21a/nock21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/nock21a.html},\n abstract = \t {There has been recently a significant boost to machine learning with distributed data, in particular with the success of federated learning. A common and very challenging setting is that of vertical or feature partitioned data, when multiple data providers hold different features about common entities. In general, training needs to be preceded by record linkage (RL), a step that finds the correspondence between the observations of the datasets. RL is prone to mistakes in the real world. Despite the importance of the problem, there has been so far no formal assessment of the way in which RL errors impact learning models. Work in the area either use heuristics or assume that the optimal RL is known in advance. In this paper, we provide the first assessment of the problem for supervised learning. For wide sets of losses, we provide technical conditions under which the classifier learned after noisy RL converges (with the data size) to the best classifier that would be learned from mistake-free RL. This yields new insights on the way the pipeline RL + ML operates, from the role of large margin classification on dampening the impact of RL mistakes to clues on how to further optimize RL as a preprocessing step to ML. Experiments on a large UCI benchmark validate those formal observations.}\n}", "pdf": "http://proceedings.mlr.press/v139/nock21a/nock21a.pdf", "supp": "", "pdf_size": 3527768, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14912715237859404143&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google Research (Brain team); Ambiata; Ambiata; The Australian National University; The Australian National University; Sensity; Ambiata; HardByte", "aff_domain": "google.com; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/nock21a.html", "aff_unique_index": "0;1;1;2;2;3;1;4", "aff_unique_norm": "Google;Ambiata;Australian National University;Sensity Systems;HardByte", "aff_unique_dep": "Google Research;;;;", "aff_unique_url": "https://research.google;https://www.ambiata.com;https://www.anu.edu.au;https://www.sensitysystems.com;", "aff_unique_abbr": "Google;;ANU;;", "aff_campus_unique_index": "0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;1;1;1;1;0;1", "aff_country_unique": "United States;Australia;" }, { "title": "The Implicit Bias for Adaptive Optimization Algorithms on Homogeneous Neural Networks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10219", "id": "10219", "proceeding": "http://proceedings.mlr.press/v139/wang21q.html", "slides": "/media/icml-2021/Slides/10219.pdf", "author_site": "Bohan Wang, Qi Meng, Wei Chen, Tie-Yan Liu", "author": "Bohan Wang; Qi Meng; Wei Chen; Tie-Yan Liu", "abstract": "Despite their overwhelming capacity to overfit, deep neural networks trained by specific optimization algorithms tend to generalize relatively well to unseen data. Recently, researchers explained it by investigating the implicit bias of optimization algorithms. A remarkable progress is the work (Lyu & Li, 2019), which proves gradient descent (GD) maximizes the margin of homogeneous deep neural networks. Except the first-order optimization algorithms like GD, adaptive algorithms such as AdaGrad, RMSProp and Adam are popular owing to their rapid training process. Mean-while, numerous works have provided empirical evidence that adaptive methods may suffer from poor generalization performance. However, theoretical explanation for the generalization of adaptive optimization algorithms is still lacking. In this paper, we study the implicit bias of adaptive optimization algorithms on homogeneous neural networks. In particular, we study the convergent direction of parameters when they are optimizing the logistic loss. We prove that the convergent direction of Adam and RMSProp is the same as GD, while for AdaGrad, the convergent direction depends on the adaptive conditioner. Technically, we provide a unified framework to analyze convergent direction of adaptive optimization algorithms by constructing novel and nontrivial adaptive gradient flow and surrogate margin. The theoretical findings explain the superiority on generalization of exponential moving average strategy that is adopted by RMSProp and Adam. To the best of knowledge, it is the first work to study the convergent direction of adaptive optimizations on non-linear deep neural networks", "bibtex": "@InProceedings{pmlr-v139-wang21q,\n title = \t {The Implicit Bias for Adaptive Optimization Algorithms on Homogeneous Neural Networks},\n author = {Wang, Bohan and Meng, Qi and Chen, Wei and Liu, Tie-Yan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10849--10858},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21q/wang21q.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21q.html},\n abstract = \t {Despite their overwhelming capacity to overfit, deep neural networks trained by specific optimization algorithms tend to generalize relatively well to unseen data. Recently, researchers explained it by investigating the implicit bias of optimization algorithms. A remarkable progress is the work (Lyu & Li, 2019), which proves gradient descent (GD) maximizes the margin of homogeneous deep neural networks. Except the first-order optimization algorithms like GD, adaptive algorithms such as AdaGrad, RMSProp and Adam are popular owing to their rapid training process. Mean-while, numerous works have provided empirical evidence that adaptive methods may suffer from poor generalization performance. However, theoretical explanation for the generalization of adaptive optimization algorithms is still lacking. In this paper, we study the implicit bias of adaptive optimization algorithms on homogeneous neural networks. In particular, we study the convergent direction of parameters when they are optimizing the logistic loss. We prove that the convergent direction of Adam and RMSProp is the same as GD, while for AdaGrad, the convergent direction depends on the adaptive conditioner. Technically, we provide a unified framework to analyze convergent direction of adaptive optimization algorithms by constructing novel and nontrivial adaptive gradient flow and surrogate margin. The theoretical findings explain the superiority on generalization of exponential moving average strategy that is adopted by RMSProp and Adam. To the best of knowledge, it is the first work to study the convergent direction of adaptive optimizations on non-linear deep neural networks}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21q/wang21q.pdf", "supp": "", "pdf_size": 754690, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6329455504055217085&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China", "aff_domain": "microsoft.com; ; ; ", "email": "microsoft.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wang21q.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Research", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/asia", "aff_unique_abbr": "MSRA", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "The Limits of Min-Max Optimization Algorithms: Convergence to Spurious Non-Critical Sets", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8633", "id": "8633", "proceeding": "http://proceedings.mlr.press/v139/hsieh21a.html", "slides": "", "author_site": "Ya-Ping Hsieh, Panayotis Mertikopoulos, Volkan Cevher", "author": "Ya-Ping Hsieh; Panayotis Mertikopoulos; Volkan Cevher", "abstract": "Compared to minimization, the min-max optimization in machine learning applications is considerably more convoluted because of the existence of cycles and similar phenomena. Such oscillatory behaviors are well-understood in the convex-concave regime, and many algorithms are known to overcome them. In this paper, we go beyond this basic setting and characterize the convergence properties of many popular methods in solving non-convex/non-concave problems. In particular, we show that a wide class of state-of-the-art schemes and heuristics may converge with arbitrarily high probability to attractors that are in no way min-max optimal or even stationary. Our work thus points out a potential pitfall among many existing theoretical frameworks, and we corroborate our theoretical claims by explicitly showcasing spurious attractors in simple two-dimensional problems.", "bibtex": "@InProceedings{pmlr-v139-hsieh21a,\n title = \t {The Limits of Min-Max Optimization Algorithms: Convergence to Spurious Non-Critical Sets},\n author = {Hsieh, Ya-Ping and Mertikopoulos, Panayotis and Cevher, Volkan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4337--4348},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hsieh21a/hsieh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hsieh21a.html},\n abstract = \t {Compared to minimization, the min-max optimization in machine learning applications is considerably more convoluted because of the existence of cycles and similar phenomena. Such oscillatory behaviors are well-understood in the convex-concave regime, and many algorithms are known to overcome them. In this paper, we go beyond this basic setting and characterize the convergence properties of many popular methods in solving non-convex/non-concave problems. In particular, we show that a wide class of state-of-the-art schemes and heuristics may converge with arbitrarily high probability to attractors that are in no way min-max optimal or even stationary. Our work thus points out a potential pitfall among many existing theoretical frameworks, and we corroborate our theoretical claims by explicitly showcasing spurious attractors in simple two-dimensional problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/hsieh21a/hsieh21a.pdf", "supp": "", "pdf_size": 3225056, "gs_citation": 112, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1373296026612042619&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Computer Science, ETH Zurich, Zurich, Switzerland; Univ. Grenoble Alpes, CNRS, Inria, LIG, Grenoble, France + Criteo AI Lab; Ecole Polytechnique Fed\u00e9rale de Lausanne, Switzerland", "aff_domain": "inf.ethz.ch; ; ", "email": "inf.ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/hsieh21a.html", "aff_unique_index": "0;1+2;3", "aff_unique_norm": "ETH Zurich;Universite Grenoble Alpes;Criteo;Ecole Polytechnique Fed\u00e9rale de Lausanne", "aff_unique_dep": "Department of Computer Science;Laboratoire d'Informatique de Grenoble (LIG);Criteo AI Lab;", "aff_unique_url": "https://www.ethz.ch;https://www.univ-grenoble-alpes.fr;https://www.criteo.com;https://www.epfl.ch", "aff_unique_abbr": "ETHZ;UGA;Criteo;EPFL", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Zurich;Grenoble;", "aff_country_unique_index": "0;1+1;0", "aff_country_unique": "Switzerland;France" }, { "title": "The Lipschitz Constant of Self-Attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8677", "id": "8677", "proceeding": "http://proceedings.mlr.press/v139/kim21i.html", "slides": "", "author_site": "Hyunjik Kim, George Papamakarios, Andriy Mnih", "author": "Hyunjik Kim; George Papamakarios; Andriy Mnih", "abstract": "Lipschitz constants of neural networks have been explored in various contexts in deep learning, such as provable adversarial robustness, estimating Wasserstein distance, stabilising training of GANs, and formulating invertible neural networks. Such works have focused on bounding the Lipschitz constant of fully connected or convolutional networks, composed of linear maps and pointwise non-linearities. In this paper, we investigate the Lipschitz constant of self-attention, a non-linear neural network module widely used in sequence modelling. We prove that the standard dot-product self-attention is not Lipschitz for unbounded input domain, and propose an alternative L2 self-attention that is Lipschitz. We derive an upper bound on the Lipschitz constant of L2 self-attention and provide empirical evidence for its asymptotic tightness. To demonstrate the practical relevance of our theoretical work, we formulate invertible self-attention and use it in a Transformer-based architecture for a character-level language modelling task.", "bibtex": "@InProceedings{pmlr-v139-kim21i,\n title = \t {The Lipschitz Constant of Self-Attention},\n author = {Kim, Hyunjik and Papamakarios, George and Mnih, Andriy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5562--5571},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21i/kim21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21i.html},\n abstract = \t {Lipschitz constants of neural networks have been explored in various contexts in deep learning, such as provable adversarial robustness, estimating Wasserstein distance, stabilising training of GANs, and formulating invertible neural networks. Such works have focused on bounding the Lipschitz constant of fully connected or convolutional networks, composed of linear maps and pointwise non-linearities. In this paper, we investigate the Lipschitz constant of self-attention, a non-linear neural network module widely used in sequence modelling. We prove that the standard dot-product self-attention is not Lipschitz for unbounded input domain, and propose an alternative L2 self-attention that is Lipschitz. We derive an upper bound on the Lipschitz constant of L2 self-attention and provide empirical evidence for its asymptotic tightness. To demonstrate the practical relevance of our theoretical work, we formulate invertible self-attention and use it in a Transformer-based architecture for a character-level language modelling task.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21i/kim21i.pdf", "supp": "", "pdf_size": 510799, "gs_citation": 196, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12356022541341785997&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind; DeepMind; DeepMind", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kim21i.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "The Logical Options Framework", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9379", "id": "9379", "proceeding": "http://proceedings.mlr.press/v139/araki21a.html", "slides": "/media/icml-2021/Slides/9379.pdf", "author_site": "Brandon Araki, Xiao Li, Kiran Vodrahalli, Jonathan DeCastro, Micah Fry, Daniela Rus", "author": "Brandon Araki; Xiao Li; Kiran Vodrahalli; Jonathan Decastro; Micah Fry; Daniela Rus", "abstract": "Learning composable policies for environments with complex rules and tasks is a challenging problem. We introduce a hierarchical reinforcement learning framework called the Logical Options Framework (LOF) that learns policies that are satisfying, optimal, and composable. LOF efficiently learns policies that satisfy tasks by representing the task as an automaton and integrating it into learning and planning. We provide and prove conditions under which LOF will learn satisfying, optimal policies. And lastly, we show how LOF\u2019s learned policies can be composed to satisfy unseen tasks with only 10-50 retraining steps on our benchmarks. We evaluate LOF on four tasks in discrete and continuous domains, including a 3D pick-and-place environment.", "bibtex": "@InProceedings{pmlr-v139-araki21a,\n title = \t {The Logical Options Framework},\n author = {Araki, Brandon and Li, Xiao and Vodrahalli, Kiran and Decastro, Jonathan and Fry, Micah and Rus, Daniela},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {307--317},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/araki21a/araki21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/araki21a.html},\n abstract = \t {Learning composable policies for environments with complex rules and tasks is a challenging problem. We introduce a hierarchical reinforcement learning framework called the Logical Options Framework (LOF) that learns policies that are satisfying, optimal, and composable. LOF efficiently learns policies that satisfy tasks by representing the task as an automaton and integrating it into learning and planning. We provide and prove conditions under which LOF will learn satisfying, optimal policies. And lastly, we show how LOF\u2019s learned policies can be composed to satisfy unseen tasks with only 10-50 retraining steps on our benchmarks. We evaluate LOF on four tasks in discrete and continuous domains, including a 3D pick-and-place environment.}\n}", "pdf": "http://proceedings.mlr.press/v139/araki21a/araki21a.pdf", "supp": "", "pdf_size": 1460921, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8491762780532620383&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "CSAIL, Massachusetts Institute of Technology, Cambridge, MA, USA; CSAIL, Massachusetts Institute of Technology, Cambridge, MA, USA; Department of Computer Science, Columbia University, New York City, NY, USA; Toyota Research Institute, Cambridge, MA, USA; MIT Lincoln Laboratory, Lexington, MA, USA; CSAIL, Massachusetts Institute of Technology, Cambridge, MA, USA", "aff_domain": "csail.mit.edu; ; ; ; ; ", "email": "csail.mit.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/araki21a.html", "aff_unique_index": "0;0;1;2;3;0", "aff_unique_norm": "Massachusetts Institute of Technology;Columbia University;Toyota Research Institute;Massachusetts Institute of Technology Lincoln Laboratory", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory (CSAIL);Department of Computer Science;;Lincoln Laboratory", "aff_unique_url": "https://www.csail.mit.edu;https://www.columbia.edu;https://www.tri.global;https://www.ll.mit.edu", "aff_unique_abbr": "MIT;Columbia;TRI;MIT LL", "aff_campus_unique_index": "0;0;1;0;2;0", "aff_campus_unique": "Cambridge;New York City;Lexington", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "The Power of Adaptivity for Stochastic Submodular Cover", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9121", "id": "9121", "proceeding": "http://proceedings.mlr.press/v139/ghuge21a.html", "slides": "", "author_site": "Rohan Ghuge, Anupam Gupta, viswanath nagarajan", "author": "Rohan Ghuge; Anupam Gupta; Viswanath Nagarajan", "abstract": "In the stochastic submodular cover problem, the goal is to select a subset of stochastic items of minimum expected cost to cover a submodular function. Solutions in this setting correspond to a sequential decision process that selects items one by one \u201cadaptively\u201d (depending on prior observations). While such adaptive solutions achieve the best objective, the inherently sequential nature makes them undesirable in many applications. We ask: \\emph{how well can solutions with only a few adaptive rounds approximate fully-adaptive solutions?} We consider both cases where the stochastic items are independent, and where they are correlated. For both situations, we obtain nearly tight answers, establishing smooth tradeoffs between the number of adaptive rounds and the solution quality, relative to fully adaptive solutions. Experiments on synthetic and real datasets validate the practical performance of our algorithms, showing qualitative improvements in the solutions as we allow more rounds of adaptivity; in practice, solutions using just a few rounds of adaptivity are nearly as good as fully adaptive solutions.", "bibtex": "@InProceedings{pmlr-v139-ghuge21a,\n title = \t {The Power of Adaptivity for Stochastic Submodular Cover},\n author = {Ghuge, Rohan and Gupta, Anupam and Nagarajan, Viswanath},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3702--3712},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ghuge21a/ghuge21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ghuge21a.html},\n abstract = \t {In the stochastic submodular cover problem, the goal is to select a subset of stochastic items of minimum expected cost to cover a submodular function. Solutions in this setting correspond to a sequential decision process that selects items one by one \u201cadaptively\u201d (depending on prior observations). While such adaptive solutions achieve the best objective, the inherently sequential nature makes them undesirable in many applications. We ask: \\emph{how well can solutions with only a few adaptive rounds approximate fully-adaptive solutions?} We consider both cases where the stochastic items are independent, and where they are correlated. For both situations, we obtain nearly tight answers, establishing smooth tradeoffs between the number of adaptive rounds and the solution quality, relative to fully adaptive solutions. Experiments on synthetic and real datasets validate the practical performance of our algorithms, showing qualitative improvements in the solutions as we allow more rounds of adaptivity; in practice, solutions using just a few rounds of adaptivity are nearly as good as fully adaptive solutions.}\n}", "pdf": "http://proceedings.mlr.press/v139/ghuge21a/ghuge21a.pdf", "supp": "", "pdf_size": 2431733, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10145858931877974949&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Department of Industrial and Operations Engineering, University of Michigan, Ann Arbor, USA; Department of Computer Science, Carnegie Mellon University, Pittsburgh, USA; Department of Industrial and Operations Engineering, University of Michigan, Ann Arbor, USA", "aff_domain": "umich.edu; ; ", "email": "umich.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/ghuge21a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Michigan;Carnegie Mellon University", "aff_unique_dep": "Department of Industrial and Operations Engineering;Department of Computer Science", "aff_unique_url": "https://www.umich.edu;https://www.cmu.edu", "aff_unique_abbr": "UM;CMU", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Ann Arbor;Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "The Power of Log-Sum-Exp: Sequential Density Ratio Matrix Estimation for Speed-Accuracy Optimization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9873", "id": "9873", "proceeding": "http://proceedings.mlr.press/v139/miyagawa21a.html", "slides": "/media/icml-2021/Slides/9873.pdf", "author_site": "Taiki Miyagawa, Akinori Ebihara", "author": "Taiki Miyagawa; Akinori F Ebihara", "abstract": "We propose a model for multiclass classification of time series to make a prediction as early and as accurate as possible. The matrix sequential probability ratio test (MSPRT) is known to be asymptotically optimal for this setting, but contains a critical assumption that hinders broad real-world applications; the MSPRT requires the underlying probability density. To address this problem, we propose to solve density ratio matrix estimation (DRME), a novel type of density ratio estimation that consists of estimating matrices of multiple density ratios with constraints and thus is more challenging than the conventional density ratio estimation. We propose a log-sum-exp-type loss function (LSEL) for solving DRME and prove the following: (i) the LSEL provides the true density ratio matrix as the sample size of the training set increases (consistency); (ii) it assigns larger gradients to harder classes (hard class weighting effect); and (iii) it provides discriminative scores even on class-imbalanced datasets (guess-aversion). Our overall architecture for early classification, MSPRT-TANDEM, statistically significantly outperforms baseline models on four datasets including action recognition, especially in the early stage of sequential observations. Our code and datasets are publicly available.", "bibtex": "@InProceedings{pmlr-v139-miyagawa21a,\n title = \t {The Power of Log-Sum-Exp: Sequential Density Ratio Matrix Estimation for Speed-Accuracy Optimization},\n author = {Miyagawa, Taiki and Ebihara, Akinori F},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7792--7804},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/miyagawa21a/miyagawa21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/miyagawa21a.html},\n abstract = \t {We propose a model for multiclass classification of time series to make a prediction as early and as accurate as possible. The matrix sequential probability ratio test (MSPRT) is known to be asymptotically optimal for this setting, but contains a critical assumption that hinders broad real-world applications; the MSPRT requires the underlying probability density. To address this problem, we propose to solve density ratio matrix estimation (DRME), a novel type of density ratio estimation that consists of estimating matrices of multiple density ratios with constraints and thus is more challenging than the conventional density ratio estimation. We propose a log-sum-exp-type loss function (LSEL) for solving DRME and prove the following: (i) the LSEL provides the true density ratio matrix as the sample size of the training set increases (consistency); (ii) it assigns larger gradients to harder classes (hard class weighting effect); and (iii) it provides discriminative scores even on class-imbalanced datasets (guess-aversion). Our overall architecture for early classification, MSPRT-TANDEM, statistically significantly outperforms baseline models on four datasets including action recognition, especially in the early stage of sequential observations. Our code and datasets are publicly available.}\n}", "pdf": "http://proceedings.mlr.press/v139/miyagawa21a/miyagawa21a.pdf", "supp": "", "pdf_size": 1796593, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8968954885886250341&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "NEC Corporation, Kanagawa, Japan; NEC Corporation, Kanagawa, Japan", "aff_domain": "nec.com; ", "email": "nec.com; ", "github": "https://github.com/TaikiMiyagawa/MSPRT-TANDEM", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/miyagawa21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "NEC Corporation", "aff_unique_dep": "", "aff_unique_url": "https://www.nec.com", "aff_unique_abbr": "NEC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Japan" }, { "title": "The Symmetry between Arms and Knapsacks: A Primal-Dual Approach for Bandits with Knapsacks", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9151", "id": "9151", "proceeding": "http://proceedings.mlr.press/v139/li21s.html", "slides": "", "author_site": "Xiaocheng Li, Chunlin Sun, Yinyu Ye", "author": "Xiaocheng Li; Chunlin Sun; Yinyu Ye", "abstract": "In this paper, we study the bandits with knapsacks (BwK) problem and develop a primal-dual based algorithm that achieves a problem-dependent logarithmic regret bound. The BwK problem extends the multi-arm bandit (MAB) problem to model the resource consumption, and the existing BwK literature has been mainly focused on deriving asymptotically optimal distribution-free regret bounds. We first study the primal and dual linear programs underlying the BwK problem. From this primal-dual perspective, we discover symmetry between arms and knapsacks, and then propose a new notion of suboptimality measure for the BwK problem. The suboptimality measure highlights the important role of knapsacks in determining algorithm regret and inspires the design of our two-phase algorithm. In the first phase, the algorithm identifies the optimal arms and the binding knapsacks, and in the second phase, it exhausts the binding knapsacks via playing the optimal arms through an adaptive procedure. Our regret upper bound involves the proposed suboptimality measure and it has a logarithmic dependence on length of horizon $T$ and a polynomial dependence on $m$ (the numbers of arms) and $d$ (the number of knapsacks). To the best of our knowledge, this is the first problem-dependent logarithmic regret bound for solving the general BwK problem.", "bibtex": "@InProceedings{pmlr-v139-li21s,\n title = \t {The Symmetry between Arms and Knapsacks: A Primal-Dual Approach for Bandits with Knapsacks},\n author = {Li, Xiaocheng and Sun, Chunlin and Ye, Yinyu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6483--6492},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21s/li21s.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21s.html},\n abstract = \t {In this paper, we study the bandits with knapsacks (BwK) problem and develop a primal-dual based algorithm that achieves a problem-dependent logarithmic regret bound. The BwK problem extends the multi-arm bandit (MAB) problem to model the resource consumption, and the existing BwK literature has been mainly focused on deriving asymptotically optimal distribution-free regret bounds. We first study the primal and dual linear programs underlying the BwK problem. From this primal-dual perspective, we discover symmetry between arms and knapsacks, and then propose a new notion of suboptimality measure for the BwK problem. The suboptimality measure highlights the important role of knapsacks in determining algorithm regret and inspires the design of our two-phase algorithm. In the first phase, the algorithm identifies the optimal arms and the binding knapsacks, and in the second phase, it exhausts the binding knapsacks via playing the optimal arms through an adaptive procedure. Our regret upper bound involves the proposed suboptimality measure and it has a logarithmic dependence on length of horizon $T$ and a polynomial dependence on $m$ (the numbers of arms) and $d$ (the number of knapsacks). To the best of our knowledge, this is the first problem-dependent logarithmic regret bound for solving the general BwK problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21s/li21s.pdf", "supp": "", "pdf_size": 380812, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8988407947796131391&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Imperial College London, London, United Kingdom; Stanford University, California, USA; Stanford University, California, USA", "aff_domain": "stanford.edu;stanford.edu; ", "email": "stanford.edu;stanford.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/li21s.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Imperial College London;Stanford University", "aff_unique_dep": ";", "aff_unique_url": "https://www.imperial.ac.uk;https://www.stanford.edu", "aff_unique_abbr": "ICL;Stanford", "aff_campus_unique_index": "0;1;1", "aff_campus_unique": "London;California", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Theory of Spectral Method for Union of Subspaces-Based Random Geometry Graph", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8625", "id": "8625", "proceeding": "http://proceedings.mlr.press/v139/li21f.html", "slides": "", "author_site": "Gen Li, Yuantao Gu", "author": "Gen Li; Yuantao Gu", "abstract": "Spectral method is a commonly used scheme to cluster data points lying close to Union of Subspaces, a task known as Subspace Clustering. The typical usage is to construct a Random Geometry Graph first and then apply spectral method to the graph to obtain clustering result. The latter step has been coined the name Spectral Clustering. As far as we know, in spite of the significance of both steps in spectral-method-based Subspace Clustering, all existing theoretical results focus on the first step of constructing the graph, but ignore the final step to correct false connections through spectral clustering. This paper establishes a theory to show the power of this method for the first time, in which we demonstrate the mechanism of spectral clustering by analyzing a simplified algorithm under the widely used semi-random model. Based on this theory, we prove the efficiency of Subspace Clustering in fairly broad conditions. The insights and analysis techniques developed in this paper might also have implications for other random graph problems.", "bibtex": "@InProceedings{pmlr-v139-li21f,\n title = \t {Theory of Spectral Method for Union of Subspaces-Based Random Geometry Graph},\n author = {Li, Gen and Gu, Yuantao},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6337--6345},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21f/li21f.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21f.html},\n abstract = \t {Spectral method is a commonly used scheme to cluster data points lying close to Union of Subspaces, a task known as Subspace Clustering. The typical usage is to construct a Random Geometry Graph first and then apply spectral method to the graph to obtain clustering result. The latter step has been coined the name Spectral Clustering. As far as we know, in spite of the significance of both steps in spectral-method-based Subspace Clustering, all existing theoretical results focus on the first step of constructing the graph, but ignore the final step to correct false connections through spectral clustering. This paper establishes a theory to show the power of this method for the first time, in which we demonstrate the mechanism of spectral clustering by analyzing a simplified algorithm under the widely used semi-random model. Based on this theory, we prove the efficiency of Subspace Clustering in fairly broad conditions. The insights and analysis techniques developed in this paper might also have implications for other random graph problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21f/li21f.pdf", "supp": "", "pdf_size": 333532, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2252997776220255389&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Beijing National Research Center for Information Science and Technology (BNRist) + Department of Electronic Engineering, Tsinghua University; Beijing National Research Center for Information Science and Technology (BNRist) + Department of Electronic Engineering, Tsinghua University", "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn", "email": "tsinghua.edu.cn;tsinghua.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/li21f.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Beijing National Research Center for Information Science and Technology;Tsinghua University", "aff_unique_dep": ";Department of Electronic Engineering", "aff_unique_url": ";https://www.tsinghua.edu.cn", "aff_unique_abbr": "BNRist;THU", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "China" }, { "title": "Think Global and Act Local: Bayesian Optimisation over High-Dimensional Categorical and Mixed Search Spaces", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8551", "id": "8551", "proceeding": "http://proceedings.mlr.press/v139/wan21b.html", "slides": "/media/icml-2021/Slides/8551.pdf", "author_site": "Xingchen Wan, Vu Nguyen, Huong Ha, Binxin Ru, Cong Lu, Michael A Osborne", "author": "Xingchen Wan; Vu Nguyen; Huong Ha; Binxin Ru; Cong Lu; Michael A. Osborne", "abstract": "High-dimensional black-box optimisation remains an important yet notoriously challenging problem. Despite the success of Bayesian optimisation methods on continuous domains, domains that are categorical, or that mix continuous and categorical variables, remain challenging. We propose a novel solution\u2014we combine local optimisation with a tailored kernel design, effectively handling high-dimensional categorical and mixed search spaces, whilst retaining sample efficiency. We further derive convergence guarantee for the proposed approach. Finally, we demonstrate empirically that our method outperforms the current baselines on a variety of synthetic and real-world tasks in terms of performance, computational costs, or both.", "bibtex": "@InProceedings{pmlr-v139-wan21b,\n title = \t {Think Global and Act Local: Bayesian Optimisation over High-Dimensional Categorical and Mixed Search Spaces},\n author = {Wan, Xingchen and Nguyen, Vu and Ha, Huong and Ru, Binxin and Lu, Cong and Osborne, Michael A.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10663--10674},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wan21b/wan21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/wan21b.html},\n abstract = \t {High-dimensional black-box optimisation remains an important yet notoriously challenging problem. Despite the success of Bayesian optimisation methods on continuous domains, domains that are categorical, or that mix continuous and categorical variables, remain challenging. We propose a novel solution\u2014we combine local optimisation with a tailored kernel design, effectively handling high-dimensional categorical and mixed search spaces, whilst retaining sample efficiency. We further derive convergence guarantee for the proposed approach. Finally, we demonstrate empirically that our method outperforms the current baselines on a variety of synthetic and real-world tasks in terms of performance, computational costs, or both.}\n}", "pdf": "http://proceedings.mlr.press/v139/wan21b/wan21b.pdf", "supp": "", "pdf_size": 9364583, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6765216544866118683&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Machine Learning Research Group, University of Oxford, Oxford, UK; Amazon, Adelaide, Australia; RMIT University, Melbourne, Australia; Machine Learning Research Group, University of Oxford, Oxford, UK; Machine Learning Research Group, University of Oxford, Oxford, UK; Machine Learning Research Group, University of Oxford, Oxford, UK", "aff_domain": "robots.ox.ac.uk; ; ; ; ; ", "email": "robots.ox.ac.uk; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/wan21b.html", "aff_unique_index": "0;1;2;0;0;0", "aff_unique_norm": "University of Oxford;Amazon;RMIT University", "aff_unique_dep": "Machine Learning Research Group;Amazon;", "aff_unique_url": "https://www.ox.ac.uk;https://www.amazon.com;https://www.rmit.edu.au", "aff_unique_abbr": "Oxford;Amazon;RMIT", "aff_campus_unique_index": "0;1;2;0;0;0", "aff_campus_unique": "Oxford;Adelaide;Melbourne", "aff_country_unique_index": "0;1;1;0;0;0", "aff_country_unique": "United Kingdom;Australia" }, { "title": "Thinking Like Transformers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9995", "id": "9995", "proceeding": "http://proceedings.mlr.press/v139/weiss21a.html", "slides": "/media/icml-2021/Slides/9995.pdf", "author_site": "Gail Weiss, Yoav Goldberg, Eran Yahav", "author": "Gail Weiss; Yoav Goldberg; Eran Yahav", "abstract": "What is the computational model behind a Transformer? Where recurrent neural networks have direct parallels in finite state machines, allowing clear discussion and thought around architecture variants or trained models, Transformers have no such familiar parallel. In this paper we aim to change that, proposing a computational model for the transformer-encoder in the form of a programming language. We map the basic components of a transformer-encoder\u2014attention and feed-forward computation\u2014into simple primitives, around which we form a programming language: the Restricted Access Sequence Processing Language (RASP). We show how RASP can be used to program solutions to tasks that could conceivably be learned by a Transformer, and how a Transformer can be trained to mimic a RASP solution. In particular, we provide RASP programs for histograms, sorting, and Dyck-languages. We further use our model to relate their difficulty in terms of the number of required layers and attention heads: analyzing a RASP program implies a maximum number of heads and layers necessary to encode a task in a transformer. Finally, we see how insights gained from our abstraction might be used to explain phenomena seen in recent works.", "bibtex": "@InProceedings{pmlr-v139-weiss21a,\n title = \t {Thinking Like Transformers},\n author = {Weiss, Gail and Goldberg, Yoav and Yahav, Eran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11080--11090},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/weiss21a/weiss21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/weiss21a.html},\n abstract = \t {What is the computational model behind a Transformer? Where recurrent neural networks have direct parallels in finite state machines, allowing clear discussion and thought around architecture variants or trained models, Transformers have no such familiar parallel. In this paper we aim to change that, proposing a computational model for the transformer-encoder in the form of a programming language. We map the basic components of a transformer-encoder\u2014attention and feed-forward computation\u2014into simple primitives, around which we form a programming language: the Restricted Access Sequence Processing Language (RASP). We show how RASP can be used to program solutions to tasks that could conceivably be learned by a Transformer, and how a Transformer can be trained to mimic a RASP solution. In particular, we provide RASP programs for histograms, sorting, and Dyck-languages. We further use our model to relate their difficulty in terms of the number of required layers and attention heads: analyzing a RASP program implies a maximum number of heads and layers necessary to encode a task in a transformer. Finally, we see how insights gained from our abstraction might be used to explain phenomena seen in recent works.}\n}", "pdf": "http://proceedings.mlr.press/v139/weiss21a/weiss21a.pdf", "supp": "", "pdf_size": 1183639, "gs_citation": 160, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18191652199606300845&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Technion, Haifa, Israel; Bar Ilan University, Ramat Gan, Israel + Allen Institute for AI; Technion, Haifa, Israel", "aff_domain": "cs.technion.ac.il; ; ", "email": "cs.technion.ac.il; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/weiss21a.html", "aff_unique_index": "0;1+2;0", "aff_unique_norm": "Technion - Israel Institute of Technology;Bar-Ilan University;Allen Institute for AI", "aff_unique_dep": ";;", "aff_unique_url": "https://www.technion.ac.il/en/;https://www.biu.ac.il;https://allenai.org", "aff_unique_abbr": "Technion;BIU;AI2", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Haifa;Ramat Gan;", "aff_country_unique_index": "0;0+1;0", "aff_country_unique": "Israel;United States" }, { "title": "Three Operator Splitting with a Nonconvex Loss Function", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9695", "id": "9695", "proceeding": "http://proceedings.mlr.press/v139/yurtsever21a.html", "slides": "", "author_site": "Alp Yurtsever, Varun Mangalick, Suvrit Sra", "author": "Alp Yurtsever; Varun Mangalick; Suvrit Sra", "abstract": "We consider the problem of minimizing the sum of three functions, one of which is nonconvex but differentiable, and the other two are convex but possibly nondifferentiable. We investigate the Three Operator Splitting method (TOS) of Davis & Yin (2017) with an aim to extend its theoretical guarantees for this nonconvex problem template. In particular, we prove convergence of TOS with nonasymptotic bounds on its nonstationarity and infeasibility errors. In contrast with the existing work on nonconvex TOS, our guarantees do not require additional smoothness assumptions on the terms comprising the objective; hence they cover instances of particular interest where the nondifferentiable terms are indicator functions. We also extend our results to a stochastic setting where we have access only to an unbiased estimator of the gradient. Finally, we illustrate the effectiveness of the proposed method through numerical experiments on quadratic assignment problems.", "bibtex": "@InProceedings{pmlr-v139-yurtsever21a,\n title = \t {Three Operator Splitting with a Nonconvex Loss Function},\n author = {Yurtsever, Alp and Mangalick, Varun and Sra, Suvrit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12267--12277},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yurtsever21a/yurtsever21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/yurtsever21a.html},\n abstract = \t {We consider the problem of minimizing the sum of three functions, one of which is nonconvex but differentiable, and the other two are convex but possibly nondifferentiable. We investigate the Three Operator Splitting method (TOS) of Davis & Yin (2017) with an aim to extend its theoretical guarantees for this nonconvex problem template. In particular, we prove convergence of TOS with nonasymptotic bounds on its nonstationarity and infeasibility errors. In contrast with the existing work on nonconvex TOS, our guarantees do not require additional smoothness assumptions on the terms comprising the objective; hence they cover instances of particular interest where the nondifferentiable terms are indicator functions. We also extend our results to a stochastic setting where we have access only to an unbiased estimator of the gradient. Finally, we illustrate the effectiveness of the proposed method through numerical experiments on quadratic assignment problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/yurtsever21a/yurtsever21a.pdf", "supp": "", "pdf_size": 996554, "gs_citation": 13, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14275996016492090770&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu; ; ", "email": "mit.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yurtsever21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8857", "id": "8857", "proceeding": "http://proceedings.mlr.press/v139/nguyen21g.html", "slides": "", "author_site": "Quynh Nguyen, Marco Mondelli, Guido Montufar", "author": "Quynh Nguyen; Marco Mondelli; Guido F Montufar", "abstract": "A recent line of work has analyzed the theoretical properties of deep neural networks via the Neural Tangent Kernel (NTK). In particular, the smallest eigenvalue of the NTK has been related to the memorization capacity, the global convergence of gradient descent algorithms and the generalization of deep nets. However, existing results either provide bounds in the two-layer setting or assume that the spectrum of the NTK matrices is bounded away from 0 for multi-layer networks. In this paper, we provide tight bounds on the smallest eigenvalue of NTK matrices for deep ReLU nets, both in the limiting case of infinite widths and for finite widths. In the finite-width setting, the network architectures we consider are fairly general: we require the existence of a wide layer with roughly order of $N$ neurons, $N$ being the number of data samples; and the scaling of the remaining layer widths is arbitrary (up to logarithmic factors). To obtain our results, we analyze various quantities of independent interest: we give lower bounds on the smallest singular value of hidden feature matrices, and upper bounds on the Lipschitz constant of input-output feature maps.", "bibtex": "@InProceedings{pmlr-v139-nguyen21g,\n title = \t {Tight Bounds on the Smallest Eigenvalue of the Neural Tangent Kernel for Deep ReLU Networks},\n author = {Nguyen, Quynh and Mondelli, Marco and Montufar, Guido F},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8119--8129},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21g/nguyen21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21g.html},\n abstract = \t {A recent line of work has analyzed the theoretical properties of deep neural networks via the Neural Tangent Kernel (NTK). In particular, the smallest eigenvalue of the NTK has been related to the memorization capacity, the global convergence of gradient descent algorithms and the generalization of deep nets. However, existing results either provide bounds in the two-layer setting or assume that the spectrum of the NTK matrices is bounded away from 0 for multi-layer networks. In this paper, we provide tight bounds on the smallest eigenvalue of NTK matrices for deep ReLU nets, both in the limiting case of infinite widths and for finite widths. In the finite-width setting, the network architectures we consider are fairly general: we require the existence of a wide layer with roughly order of $N$ neurons, $N$ being the number of data samples; and the scaling of the remaining layer widths is arbitrary (up to logarithmic factors). To obtain our results, we analyze various quantities of independent interest: we give lower bounds on the smallest singular value of hidden feature matrices, and upper bounds on the Lipschitz constant of input-output feature maps.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21g/nguyen21g.pdf", "supp": "", "pdf_size": 591332, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2327428671252458232&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "MPI-MIS, Germany; IST, Austria; UCLA", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/nguyen21g.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Max Planck Institute for Mathematics in the Sciences;Institute of Science and Technology Austria;University of California, Los Angeles", "aff_unique_dep": ";;", "aff_unique_url": "https://www.mis.mpg.de;https://www.ist.ac.at;https://www.ucla.edu", "aff_unique_abbr": "MPI-MIS;IST Austria;UCLA", "aff_campus_unique_index": "1", "aff_campus_unique": ";Los Angeles", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Germany;Austria;United States" }, { "title": "Tightening the Dependence on Horizon in the Sample Complexity of Q-Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8929", "id": "8929", "proceeding": "http://proceedings.mlr.press/v139/li21b.html", "slides": "/media/icml-2021/Slides/8929.pdf", "author_site": "Gen Li, Changxiao Cai, Yuxin Chen, Yuantao Gu, Yuting Wei, Yuejie Chi", "author": "Gen Li; Changxiao Cai; Yuxin Chen; Yuantao Gu; Yuting Wei; Yuejie Chi", "abstract": "Q-learning, which seeks to learn the optimal Q-function of a Markov decision process (MDP) in a model-free fashion, lies at the heart of reinforcement learning. Focusing on the synchronous setting (such that independent samples for all state-action pairs are queried via a generative model in each iteration), substantial progress has been made recently towards understanding the sample efficiency of Q-learning. To yield an entrywise $\\varepsilon$-accurate estimate of the optimal Q-function, state-of-the-art theory requires at least an order of $\\frac{|S||A|}{(1-\\gamma)^5\\varepsilon^{2}}$ samples in the infinite-horizon $\\gamma$-discounted setting. In this work, we sharpen the sample complexity of synchronous Q-learning to the order of $\\frac{|S||A|}{(1-\\gamma)^4\\varepsilon^2}$ (up to some logarithmic factor) for any $0<\\varepsilon <1$, leading to an order-wise improvement in $\\frac{1}{1-\\gamma}$. Analogous results are derived for finite-horizon MDPs as well. Notably, our sample complexity analysis unveils the effectiveness of vanilla Q-learning, which matches that of speedy Q-learning without requiring extra computation and storage. Our result is obtained by identifying novel error decompositions and recursion relations, which might shed light on how to study other variants of Q-learning.", "bibtex": "@InProceedings{pmlr-v139-li21b,\n title = \t {Tightening the Dependence on Horizon in the Sample Complexity of Q-Learning},\n author = {Li, Gen and Cai, Changxiao and Chen, Yuxin and Gu, Yuantao and Wei, Yuting and Chi, Yuejie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6296--6306},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21b/li21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21b.html},\n abstract = \t {Q-learning, which seeks to learn the optimal Q-function of a Markov decision process (MDP) in a model-free fashion, lies at the heart of reinforcement learning. Focusing on the synchronous setting (such that independent samples for all state-action pairs are queried via a generative model in each iteration), substantial progress has been made recently towards understanding the sample efficiency of Q-learning. To yield an entrywise $\\varepsilon$-accurate estimate of the optimal Q-function, state-of-the-art theory requires at least an order of $\\frac{|S||A|}{(1-\\gamma)^5\\varepsilon^{2}}$ samples in the infinite-horizon $\\gamma$-discounted setting. In this work, we sharpen the sample complexity of synchronous Q-learning to the order of $\\frac{|S||A|}{(1-\\gamma)^4\\varepsilon^2}$ (up to some logarithmic factor) for any $0<\\varepsilon <1$, leading to an order-wise improvement in $\\frac{1}{1-\\gamma}$. Analogous results are derived for finite-horizon MDPs as well. Notably, our sample complexity analysis unveils the effectiveness of vanilla Q-learning, which matches that of speedy Q-learning without requiring extra computation and storage. Our result is obtained by identifying novel error decompositions and recursion relations, which might shed light on how to study other variants of Q-learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21b/li21b.pdf", "supp": "", "pdf_size": 399526, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8214697467178508755&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Electronic Engineering, Tsinghua University; Department of Electrical and Computer Engineering, Princeton University; Department of Electrical and Computer Engineering, Princeton University; Department of Electronic Engineering, Tsinghua University; Department of Statistics and Data Science, Carnegie Mellon University; Department of Electrical and Computer Engineering, Carnegie Mellon University", "aff_domain": "princeton.edu; ; ; ; ; ", "email": "princeton.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/li21b.html", "aff_unique_index": "0;1;1;0;2;2", "aff_unique_norm": "Tsinghua University;Princeton University;Carnegie Mellon University", "aff_unique_dep": "Department of Electronic Engineering;Department of Electrical and Computer Engineering;Department of Statistics and Data Science", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.princeton.edu;https://www.cmu.edu", "aff_unique_abbr": "THU;Princeton;CMU", "aff_campus_unique_index": "1", "aff_campus_unique": ";Pittsburgh", "aff_country_unique_index": "0;1;1;0;1;1", "aff_country_unique": "China;United States" }, { "title": "Tighter Bounds on the Log Marginal Likelihood of Gaussian Process Regression Using Conjugate Gradients", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10067", "id": "10067", "proceeding": "http://proceedings.mlr.press/v139/artemev21a.html", "slides": "/media/icml-2021/Slides/10067.pdf", "author_site": "Artem Artemev, David Burt, Mark van der Wilk", "author": "Artem Artemev; David R. Burt; Mark van der Wilk", "abstract": "We propose a lower bound on the log marginal likelihood of Gaussian process regression models that can be computed without matrix factorisation of the full kernel matrix. We show that approximate maximum likelihood learning of model parameters by maximising our lower bound retains many benefits of the sparse variational approach while reducing the bias introduced into hyperparameter learning. The basis of our bound is a more careful analysis of the log-determinant term appearing in the log marginal likelihood, as well as using the method of conjugate gradients to derive tight lower bounds on the term involving a quadratic form. Our approach is a step forward in unifying methods relying on lower bound maximisation (e.g. variational methods) and iterative approaches based on conjugate gradients for training Gaussian processes. In experiments, we show improved predictive performance with our model for a comparable amount of training time compared to other conjugate gradient based approaches.", "bibtex": "@InProceedings{pmlr-v139-artemev21a,\n title = \t {Tighter Bounds on the Log Marginal Likelihood of Gaussian Process Regression Using Conjugate Gradients},\n author = {Artemev, Artem and Burt, David R. and van der Wilk, Mark},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {362--372},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/artemev21a/artemev21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/artemev21a.html},\n abstract = \t {We propose a lower bound on the log marginal likelihood of Gaussian process regression models that can be computed without matrix factorisation of the full kernel matrix. We show that approximate maximum likelihood learning of model parameters by maximising our lower bound retains many benefits of the sparse variational approach while reducing the bias introduced into hyperparameter learning. The basis of our bound is a more careful analysis of the log-determinant term appearing in the log marginal likelihood, as well as using the method of conjugate gradients to derive tight lower bounds on the term involving a quadratic form. Our approach is a step forward in unifying methods relying on lower bound maximisation (e.g. variational methods) and iterative approaches based on conjugate gradients for training Gaussian processes. In experiments, we show improved predictive performance with our model for a comparable amount of training time compared to other conjugate gradient based approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/artemev21a/artemev21a.pdf", "supp": "", "pdf_size": 768661, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11018684751248796266&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computing, Imperial College London, London, UK + Secondmind, Cambridge, UK; Department of Engineering, University of Cambridge, Cambridge, UK; Department of Computing, Imperial College London, London, UK", "aff_domain": "imperial.ac.uk;cam.ac.uk; ", "email": "imperial.ac.uk;cam.ac.uk; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/artemev21a.html", "aff_unique_index": "0+1;2;0", "aff_unique_norm": "Imperial College London;Secondmind;University of Cambridge", "aff_unique_dep": "Department of Computing;;Department of Engineering", "aff_unique_url": "https://www.imperial.ac.uk;;https://www.cam.ac.uk", "aff_unique_abbr": "Imperial;;Cambridge", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "London;Cambridge", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Tilting the playing field: Dynamical loss functions for machine learning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9505", "id": "9505", "proceeding": "http://proceedings.mlr.press/v139/ruiz-garcia21a.html", "slides": "/media/icml-2021/Slides/9505.pdf", "author_site": "Miguel Ruiz Garcia, Ge Zhang, Samuel Schoenholz, Andrea Liu", "author": "Miguel Ruiz-Garcia; Ge Zhang; Samuel S Schoenholz; Andrea J. Liu", "abstract": "We show that learning can be improved by using loss functions that evolve cyclically during training to emphasize one class at a time. In underparameterized networks, such dynamical loss functions can lead to successful training for networks that fail to find deep minima of the standard cross-entropy loss. In overparameterized networks, dynamical loss functions can lead to better generalization. Improvement arises from the interplay of the changing loss landscape with the dynamics of the system as it evolves to minimize the loss. In particular, as the loss function oscillates, instabilities develop in the form of bifurcation cascades, which we study using the Hessian and Neural Tangent Kernel. Valleys in the landscape widen and deepen, and then narrow and rise as the loss landscape changes during a cycle. As the landscape narrows, the learning rate becomes too large and the network becomes unstable and bounces around the valley. This process ultimately pushes the system into deeper and wider regions of the loss landscape and is characterized by decreasing eigenvalues of the Hessian. This results in better regularized models with improved generalization performance.", "bibtex": "@InProceedings{pmlr-v139-ruiz-garcia21a,\n title = \t {Tilting the playing field: Dynamical loss functions for machine learning},\n author = {Ruiz-Garcia, Miguel and Zhang, Ge and Schoenholz, Samuel S and Liu, Andrea J.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9157--9167},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ruiz-garcia21a/ruiz-garcia21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ruiz-garcia21a.html},\n abstract = \t {We show that learning can be improved by using loss functions that evolve cyclically during training to emphasize one class at a time. In underparameterized networks, such dynamical loss functions can lead to successful training for networks that fail to find deep minima of the standard cross-entropy loss. In overparameterized networks, dynamical loss functions can lead to better generalization. Improvement arises from the interplay of the changing loss landscape with the dynamics of the system as it evolves to minimize the loss. In particular, as the loss function oscillates, instabilities develop in the form of bifurcation cascades, which we study using the Hessian and Neural Tangent Kernel. Valleys in the landscape widen and deepen, and then narrow and rise as the loss landscape changes during a cycle. As the landscape narrows, the learning rate becomes too large and the network becomes unstable and bounces around the valley. This process ultimately pushes the system into deeper and wider regions of the loss landscape and is characterized by decreasing eigenvalues of the Hessian. This results in better regularized models with improved generalization performance.}\n}", "pdf": "http://proceedings.mlr.press/v139/ruiz-garcia21a/ruiz-garcia21a.pdf", "supp": "", "pdf_size": 1288025, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1722474778051641263&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Physics and Astronomy, University of Pennsylvania, Philadelphia, PA, USA+Department of Applied Mathematics, ETSII, Universidad Polit \u00b4ecnica de Madrid, Madrid, Spain; Department of Physics and Astronomy, University of Pennsylvania, Philadelphia, PA, USA; Google Research: Brain Team; Department of Physics and Astronomy, University of Pennsylvania, Philadelphia, PA, USA", "aff_domain": "uc3m.es; ; ; ", "email": "uc3m.es; ; ; ", "github": "https://github.com/miguel-rg/dynamical-loss-functions", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ruiz-garcia21a.html", "aff_unique_index": "0+1;0;2;0", "aff_unique_norm": "University of Pennsylvania;Universidad Polit\u00e9cnica de Madrid;Google", "aff_unique_dep": "Department of Physics and Astronomy;Department of Applied Mathematics;Google Research: Brain Team", "aff_unique_url": "https://www.upenn.edu;https://www.upm.es;https://research.google", "aff_unique_abbr": "UPenn;UPM;Google", "aff_campus_unique_index": "0+1;0;2;0", "aff_campus_unique": "Philadelphia;Madrid;Mountain View", "aff_country_unique_index": "0+1;0;0;0", "aff_country_unique": "United States;Spain" }, { "title": "To be Robust or to be Fair: Towards Fairness in Adversarial Training", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9585", "id": "9585", "proceeding": "http://proceedings.mlr.press/v139/xu21b.html", "slides": "/media/icml-2021/Slides/9585.pdf", "author_site": "Han Xu, Xiaorui Liu, Yaxin Li, Anil Jain, Jiliang Tang", "author": "Han Xu; Xiaorui Liu; Yaxin Li; Anil Jain; Jiliang Tang", "abstract": "Adversarial training algorithms have been proved to be reliable to improve machine learning models\u2019 robustness against adversarial examples. However, we find that adversarial training algorithms tend to introduce severe disparity of accuracy and robustness between different groups of data. For instance, PGD adversarially trained ResNet18 model on CIFAR-10 has 93% clean accuracy and 67% PGD l_infty-8 adversarial accuracy on the class \u201dautomobile\u201d but only 65% and 17% on class \u201dcat\u201d. This phenomenon happens in balanced datasets and does not exist in naturally trained models when only using clean samples. In this work, we empirically and theoretically show that this phenomenon can generally happen under adversarial training algorithms which minimize DNN models\u2019 robust errors. Motivated by these findings, we propose a Fair-Robust-Learning (FRL) framework to mitigate this unfairness problem when doing adversarial defenses and experimental results validate the effectiveness of FRL.", "bibtex": "@InProceedings{pmlr-v139-xu21b,\n title = \t {To be Robust or to be Fair: Towards Fairness in Adversarial Training},\n author = {Xu, Han and Liu, Xiaorui and Li, Yaxin and Jain, Anil and Tang, Jiliang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11492--11501},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/xu21b/xu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/xu21b.html},\n abstract = \t {Adversarial training algorithms have been proved to be reliable to improve machine learning models\u2019 robustness against adversarial examples. However, we find that adversarial training algorithms tend to introduce severe disparity of accuracy and robustness between different groups of data. For instance, PGD adversarially trained ResNet18 model on CIFAR-10 has 93% clean accuracy and 67% PGD l_infty-8 adversarial accuracy on the class \u201dautomobile\u201d but only 65% and 17% on class \u201dcat\u201d. This phenomenon happens in balanced datasets and does not exist in naturally trained models when only using clean samples. In this work, we empirically and theoretically show that this phenomenon can generally happen under adversarial training algorithms which minimize DNN models\u2019 robust errors. Motivated by these findings, we propose a Fair-Robust-Learning (FRL) framework to mitigate this unfairness problem when doing adversarial defenses and experimental results validate the effectiveness of FRL.}\n}", "pdf": "http://proceedings.mlr.press/v139/xu21b/xu21b.pdf", "supp": "", "pdf_size": 672884, "gs_citation": 225, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10942841886932715395&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Engineering, Michigan State University; Department of Computer Science and Engineering, Michigan State University; Department of Computer Science and Engineering, Michigan State University; Department of Computer Science and Engineering, Michigan State University; Department of Computer Science and Engineering, Michigan State University", "aff_domain": "msu.edu;msu.edu; ; ; ", "email": "msu.edu;msu.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/xu21b.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Michigan State University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.msu.edu", "aff_unique_abbr": "MSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Top-k eXtreme Contextual Bandits with Arm Hierarchy", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10217", "id": "10217", "proceeding": "http://proceedings.mlr.press/v139/sen21a.html", "slides": "", "author_site": "Rajat Sen, Alexander Rakhlin, Lexing Ying, Rahul Kidambi, Dean Foster, Daniel Hill, Inderjit Dhillon", "author": "Rajat Sen; Alexander Rakhlin; Lexing Ying; Rahul Kidambi; Dean Foster; Daniel N Hill; Inderjit S. Dhillon", "abstract": "Motivated by modern applications, such as online advertisement and recommender systems, we study the top-$k$ extreme contextual bandits problem, where the total number of arms can be enormous, and the learner is allowed to select $k$ arms and observe all or some of the rewards for the chosen arms. We first propose an algorithm for the non-extreme realizable setting, utilizing the Inverse Gap Weighting strategy for selecting multiple arms. We show that our algorithm has a regret guarantee of $O(k\\sqrt{(A-k+1)T \\log (|F|T)})$, where $A$ is the total number of arms and $F$ is the class containing the regression function, while only requiring $\\tilde{O}(A)$ computation per time step. In the extreme setting, where the total number of arms can be in the millions, we propose a practically-motivated arm hierarchy model that induces a certain structure in mean rewards to ensure statistical and computational efficiency. The hierarchical structure allows for an exponential reduction in the number of relevant arms for each context, thus resulting in a regret guarantee of $O(k\\sqrt{(\\log A-k+1)T \\log (|F|T)})$. Finally, we implement our algorithm using a hierarchical linear function class and show superior performance with respect to well-known benchmarks on simulated bandit feedback experiments using extreme multi-label classification datasets. On a dataset with three million arms, our reduction scheme has an average inference time of only 7.9 milliseconds, which is a 100x improvement.", "bibtex": "@InProceedings{pmlr-v139-sen21a,\n title = \t {Top-k eXtreme Contextual Bandits with Arm Hierarchy},\n author = {Sen, Rajat and Rakhlin, Alexander and Ying, Lexing and Kidambi, Rahul and Foster, Dean and Hill, Daniel N and Dhillon, Inderjit S.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9422--9433},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sen21a/sen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sen21a.html},\n abstract = \t {Motivated by modern applications, such as online advertisement and recommender systems, we study the top-$k$ extreme contextual bandits problem, where the total number of arms can be enormous, and the learner is allowed to select $k$ arms and observe all or some of the rewards for the chosen arms. We first propose an algorithm for the non-extreme realizable setting, utilizing the Inverse Gap Weighting strategy for selecting multiple arms. We show that our algorithm has a regret guarantee of $O(k\\sqrt{(A-k+1)T \\log (|F|T)})$, where $A$ is the total number of arms and $F$ is the class containing the regression function, while only requiring $\\tilde{O}(A)$ computation per time step. In the extreme setting, where the total number of arms can be in the millions, we propose a practically-motivated arm hierarchy model that induces a certain structure in mean rewards to ensure statistical and computational efficiency. The hierarchical structure allows for an exponential reduction in the number of relevant arms for each context, thus resulting in a regret guarantee of $O(k\\sqrt{(\\log A-k+1)T \\log (|F|T)})$. Finally, we implement our algorithm using a hierarchical linear function class and show superior performance with respect to well-known benchmarks on simulated bandit feedback experiments using extreme multi-label classification datasets. On a dataset with three million arms, our reduction scheme has an average inference time of only 7.9 milliseconds, which is a 100x improvement.}\n}", "pdf": "http://proceedings.mlr.press/v139/sen21a/sen21a.pdf", "supp": "", "pdf_size": 1229523, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18180552100019645288&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Google Research, Mountain View (work done while at Amazon); Massachusetts Institute of Technology, Boston; Stanford University, Palo Alto; Amazon; Amazon; Amazon; Department of Computer Science, University of Texas, Austin", "aff_domain": "utexas.edu; ; ; ; ; ; ", "email": "utexas.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/sen21a.html", "aff_unique_index": "0;1;2;3;3;3;4", "aff_unique_norm": "Google;Massachusetts Institute of Technology;Stanford University;Amazon;University of Texas at Austin", "aff_unique_dep": "Google Research;;;Amazon.com, Inc.;Department of Computer Science", "aff_unique_url": "https://research.google;https://web.mit.edu;https://www.stanford.edu;https://www.amazon.com;https://www.utexas.edu", "aff_unique_abbr": "Google;MIT;Stanford;Amazon;UT Austin", "aff_campus_unique_index": "0;1;2;4", "aff_campus_unique": "Mountain View;Boston;Palo Alto;;Austin", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Toward Better Generalization Bounds with Locally Elastic Stability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9565", "id": "9565", "proceeding": "http://proceedings.mlr.press/v139/deng21b.html", "slides": "", "author_site": "Zhun Deng, Hangfeng He, Weijie Su", "author": "Zhun Deng; Hangfeng He; Weijie Su", "abstract": "Algorithmic stability is a key characteristic to ensure the generalization ability of a learning algorithm. Among different notions of stability, \\emph{uniform stability} is arguably the most popular one, which yields exponential generalization bounds. However, uniform stability only considers the worst-case loss change (or so-called sensitivity) by removing a single data point, which is distribution-independent and therefore undesirable. There are many cases that the worst-case sensitivity of the loss is much larger than the average sensitivity taken over the single data point that is removed, especially in some advanced models such as random feature models or neural networks. Many previous works try to mitigate the distribution independent issue by proposing weaker notions of stability, however, they either only yield polynomial bounds or the bounds derived do not vanish as sample size goes to infinity. Given that, we propose \\emph{locally elastic stability} as a weaker and distribution-dependent stability notion, which still yields exponential generalization bounds. We further demonstrate that locally elastic stability implies tighter generalization bounds than those derived based on uniform stability in many situations by revisiting the examples of bounded support vector machines, regularized least square regressions, and stochastic gradient descent.", "bibtex": "@InProceedings{pmlr-v139-deng21b,\n title = \t {Toward Better Generalization Bounds with Locally Elastic Stability},\n author = {Deng, Zhun and He, Hangfeng and Su, Weijie},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2590--2600},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/deng21b/deng21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/deng21b.html},\n abstract = \t {Algorithmic stability is a key characteristic to ensure the generalization ability of a learning algorithm. Among different notions of stability, \\emph{uniform stability} is arguably the most popular one, which yields exponential generalization bounds. However, uniform stability only considers the worst-case loss change (or so-called sensitivity) by removing a single data point, which is distribution-independent and therefore undesirable. There are many cases that the worst-case sensitivity of the loss is much larger than the average sensitivity taken over the single data point that is removed, especially in some advanced models such as random feature models or neural networks. Many previous works try to mitigate the distribution independent issue by proposing weaker notions of stability, however, they either only yield polynomial bounds or the bounds derived do not vanish as sample size goes to infinity. Given that, we propose \\emph{locally elastic stability} as a weaker and distribution-dependent stability notion, which still yields exponential generalization bounds. We further demonstrate that locally elastic stability implies tighter generalization bounds than those derived based on uniform stability in many situations by revisiting the examples of bounded support vector machines, regularized least square regressions, and stochastic gradient descent.}\n}", "pdf": "http://proceedings.mlr.press/v139/deng21b/deng21b.pdf", "supp": "", "pdf_size": 602755, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13271207811226880319&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Harvard University; Department of Computer and Information Science, University of Pennsylvania; Wharton Statistics Department, University of Pennsylvania", "aff_domain": "g.harvard.edu; ; ", "email": "g.harvard.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/deng21b.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Harvard University;University of Pennsylvania", "aff_unique_dep": ";Department of Computer and Information Science", "aff_unique_url": "https://www.harvard.edu;https://www.upenn.edu", "aff_unique_abbr": "Harvard;UPenn", "aff_campus_unique_index": "1", "aff_campus_unique": ";Philadelphia", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Toward Understanding the Feature Learning Process of Self-supervised Contrastive Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8579", "id": "8579", "proceeding": "http://proceedings.mlr.press/v139/wen21c.html", "slides": "", "author_site": "Zixin Wen, Yuanzhi Li", "author": "Zixin Wen; Yuanzhi Li", "abstract": "We formally study how contrastive learning learns the feature representations for neural networks by investigating its feature learning process. We consider the case where our data are comprised of two types of features: the sparse features which we want to learn from, and the dense features we want to get rid of. Theoretically, we prove that contrastive learning using ReLU networks provably learns the desired features if proper augmentations are adopted. We present an underlying principle called feature decoupling to explain the effects of augmentations, where we theoretically characterize how augmentations can reduce the correlations of dense features between positive samples while keeping the correlations of sparse features intact, thereby forcing the neural networks to learn from the self-supervision of sparse features. Empirically, we verified that the feature decoupling principle matches the underlying mechanism of contrastive learning in practice.", "bibtex": "@InProceedings{pmlr-v139-wen21c,\n title = \t {Toward Understanding the Feature Learning Process of Self-supervised Contrastive Learning},\n author = {Wen, Zixin and Li, Yuanzhi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11112--11122},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wen21c/wen21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/wen21c.html},\n abstract = \t {We formally study how contrastive learning learns the feature representations for neural networks by investigating its feature learning process. We consider the case where our data are comprised of two types of features: the sparse features which we want to learn from, and the dense features we want to get rid of. Theoretically, we prove that contrastive learning using ReLU networks provably learns the desired features if proper augmentations are adopted. We present an underlying principle called feature decoupling to explain the effects of augmentations, where we theoretically characterize how augmentations can reduce the correlations of dense features between positive samples while keeping the correlations of sparse features intact, thereby forcing the neural networks to learn from the self-supervision of sparse features. Empirically, we verified that the feature decoupling principle matches the underlying mechanism of contrastive learning in practice.}\n}", "pdf": "http://proceedings.mlr.press/v139/wen21c/wen21c.pdf", "supp": "", "pdf_size": 3020492, "gs_citation": 170, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6978876378654769580&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "University of International Business and Economics, Beijing; Carnegie Mellon University", "aff_domain": "andrew.cmu.edu;andrew.cmu.edu", "email": "andrew.cmu.edu;andrew.cmu.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/wen21c.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of International Business and Economics;Carnegie Mellon University", "aff_unique_dep": ";", "aff_unique_url": "http://www.uibe.edu.cn;https://www.cmu.edu", "aff_unique_abbr": "UIBE;CMU", "aff_campus_unique_index": "0", "aff_campus_unique": "Beijing;", "aff_country_unique_index": "0;1", "aff_country_unique": "China;United States" }, { "title": "Towards Better Laplacian Representation in Reinforcement Learning with Generalized Graph Drawing", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8967", "id": "8967", "proceeding": "http://proceedings.mlr.press/v139/wang21ae.html", "slides": "", "author_site": "Kaixin Wang, Kuangqi Zhou, Qixin Zhang, Jie Shao, Bryan Hooi, Jiashi Feng", "author": "Kaixin Wang; Kuangqi Zhou; Qixin Zhang; Jie Shao; Bryan Hooi; Jiashi Feng", "abstract": "The Laplacian representation recently gains increasing attention for reinforcement learning as it provides succinct and informative representation for states, by taking the eigenvectors of the Laplacian matrix of the state-transition graph as state embeddings. Such representation captures the geometry of the underlying state space and is beneficial to RL tasks such as option discovery and reward shaping. To approximate the Laplacian representation in large (or even continuous) state spaces, recent works propose to minimize a spectral graph drawing objective, which however has infinitely many global minimizers other than the eigenvectors. As a result, their learned Laplacian representation may differ from the ground truth. To solve this problem, we reformulate the graph drawing objective into a generalized form and derive a new learning objective, which is proved to have eigenvectors as its unique global minimizer. It enables learning high-quality Laplacian representations that faithfully approximate the ground truth. We validate this via comprehensive experiments on a set of gridworld and continuous control environments. Moreover, we show that our learned Laplacian representations lead to more exploratory options and better reward shaping.", "bibtex": "@InProceedings{pmlr-v139-wang21ae,\n title = \t {Towards Better Laplacian Representation in Reinforcement Learning with Generalized Graph Drawing},\n author = {Wang, Kaixin and Zhou, Kuangqi and Zhang, Qixin and Shao, Jie and Hooi, Bryan and Feng, Jiashi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11003--11012},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21ae/wang21ae.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21ae.html},\n abstract = \t {The Laplacian representation recently gains increasing attention for reinforcement learning as it provides succinct and informative representation for states, by taking the eigenvectors of the Laplacian matrix of the state-transition graph as state embeddings. Such representation captures the geometry of the underlying state space and is beneficial to RL tasks such as option discovery and reward shaping. To approximate the Laplacian representation in large (or even continuous) state spaces, recent works propose to minimize a spectral graph drawing objective, which however has infinitely many global minimizers other than the eigenvectors. As a result, their learned Laplacian representation may differ from the ground truth. To solve this problem, we reformulate the graph drawing objective into a generalized form and derive a new learning objective, which is proved to have eigenvectors as its unique global minimizer. It enables learning high-quality Laplacian representations that faithfully approximate the ground truth. We validate this via comprehensive experiments on a set of gridworld and continuous control environments. Moreover, we show that our learned Laplacian representations lead to more exploratory options and better reward shaping.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21ae/wang21ae.pdf", "supp": "", "pdf_size": 2798875, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8098523240895445926&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "National University of Singapore; National University of Singapore; City University of Hong Kong; ByteDance AI lab; National University of Singapore; National University of Singapore", "aff_domain": "u.nus.edu;u.nus.edu; ; ; ; ", "email": "u.nus.edu;u.nus.edu; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/wang21ae.html", "aff_unique_index": "0;0;1;2;0;0", "aff_unique_norm": "National University of Singapore;City University of Hong Kong;ByteDance", "aff_unique_dep": ";;AI lab", "aff_unique_url": "https://www.nus.edu.sg;https://www.cityu.edu.hk;https://www.bytedance.com", "aff_unique_abbr": "NUS;CityU;ByteDance", "aff_campus_unique_index": "1", "aff_campus_unique": ";Hong Kong SAR", "aff_country_unique_index": "0;0;1;1;0;0", "aff_country_unique": "Singapore;China" }, { "title": "Towards Better Robust Generalization with Shift Consistency Regularization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10733", "id": "10733", "proceeding": "http://proceedings.mlr.press/v139/zhang21p.html", "slides": "/media/icml-2021/Slides/10733.pdf", "author_site": "Shufei Zhang, Zhuang Qian, Kaizhu Huang, Qiufeng Wang, Rui Zhang, Xinping Yi", "author": "Shufei Zhang; Zhuang Qian; Kaizhu Huang; Qiufeng Wang; Rui Zhang; Xinping Yi", "abstract": "While adversarial training becomes one of the most promising defending approaches against adversarial attacks for deep neural networks, the conventional wisdom through robust optimization may usually not guarantee good generalization for robustness. Concerning with robust generalization over unseen adversarial data, this paper investigates adversarial training from a novel perspective of shift consistency in latent space. We argue that the poor robust generalization of adversarial training is owing to the significantly dispersed latent representations generated by training and test adversarial data, as the adversarial perturbations push the latent features of natural examples in the same class towards diverse directions. This is underpinned by the theoretical analysis of the robust generalization gap, which is upper-bounded by the standard one over the natural data and a term of feature inconsistent shift caused by adversarial perturbation {\u2013} a measure of latent dispersion. Towards better robust generalization, we propose a new regularization method {\u2013} shift consistency regularization (SCR) {\u2013} to steer the same-class latent features of both natural and adversarial data into a common direction during adversarial training. The effectiveness of SCR in adversarial training is evaluated through extensive experiments over different datasets, such as CIFAR-10, CIFAR-100, and SVHN, against several competitive methods.", "bibtex": "@InProceedings{pmlr-v139-zhang21p,\n title = \t {Towards Better Robust Generalization with Shift Consistency Regularization},\n author = {Zhang, Shufei and Qian, Zhuang and Huang, Kaizhu and Wang, Qiufeng and Zhang, Rui and Yi, Xinping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12524--12534},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21p/zhang21p.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21p.html},\n abstract = \t {While adversarial training becomes one of the most promising defending approaches against adversarial attacks for deep neural networks, the conventional wisdom through robust optimization may usually not guarantee good generalization for robustness. Concerning with robust generalization over unseen adversarial data, this paper investigates adversarial training from a novel perspective of shift consistency in latent space. We argue that the poor robust generalization of adversarial training is owing to the significantly dispersed latent representations generated by training and test adversarial data, as the adversarial perturbations push the latent features of natural examples in the same class towards diverse directions. This is underpinned by the theoretical analysis of the robust generalization gap, which is upper-bounded by the standard one over the natural data and a term of feature inconsistent shift caused by adversarial perturbation {\u2013} a measure of latent dispersion. Towards better robust generalization, we propose a new regularization method {\u2013} shift consistency regularization (SCR) {\u2013} to steer the same-class latent features of both natural and adversarial data into a common direction during adversarial training. The effectiveness of SCR in adversarial training is evaluated through extensive experiments over different datasets, such as CIFAR-10, CIFAR-100, and SVHN, against several competitive methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21p/zhang21p.pdf", "supp": "", "pdf_size": 3092539, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9979052837683231258&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhang21p.html" }, { "title": "Towards Certifying L-infinity Robustness using Neural Networks with L-inf-dist Neurons", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9827", "id": "9827", "proceeding": "http://proceedings.mlr.press/v139/zhang21b.html", "slides": "/media/icml-2021/Slides/9827.pdf", "author_site": "Bohang Zhang, Tianle Cai, Zhou Lu, Di He, Liwei Wang", "author": "Bohang Zhang; Tianle Cai; Zhou Lu; Di He; Liwei Wang", "abstract": "It is well-known that standard neural networks, even with a high classification accuracy, are vulnerable to small $\\ell_\\infty$-norm bounded adversarial perturbations. Although many attempts have been made, most previous works either can only provide empirical verification of the defense to a particular attack method, or can only develop a certified guarantee of the model robustness in limited scenarios. In this paper, we seek for a new approach to develop a theoretically principled neural network that inherently resists $\\ell_\\infty$ perturbations. In particular, we design a novel neuron that uses $\\ell_\\infty$-distance as its basic operation (which we call $\\ell_\\infty$-dist neuron), and show that any neural network constructed with $\\ell_\\infty$-dist neurons (called $\\ell_{\\infty}$-dist net) is naturally a 1-Lipschitz function with respect to $\\ell_\\infty$-norm. This directly provides a rigorous guarantee of the certified robustness based on the margin of prediction outputs. We then prove that such networks have enough expressive power to approximate any 1-Lipschitz function with robust generalization guarantee. We further provide a holistic training strategy that can greatly alleviate optimization difficulties. Experimental results show that using $\\ell_{\\infty}$-dist nets as basic building blocks, we consistently achieve state-of-the-art performance on commonly used datasets: 93.09% certified accuracy on MNIST ($\\epsilon=0.3$), 35.42% on CIFAR-10 ($\\epsilon=8/255$) and 16.31% on TinyImageNet ($\\epsilon=1/255$).", "bibtex": "@InProceedings{pmlr-v139-zhang21b,\n title = \t {Towards Certifying L-infinity Robustness using Neural Networks with L-inf-dist Neurons},\n author = {Zhang, Bohang and Cai, Tianle and Lu, Zhou and He, Di and Wang, Liwei},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12368--12379},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21b/zhang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21b.html},\n abstract = \t {It is well-known that standard neural networks, even with a high classification accuracy, are vulnerable to small $\\ell_\\infty$-norm bounded adversarial perturbations. Although many attempts have been made, most previous works either can only provide empirical verification of the defense to a particular attack method, or can only develop a certified guarantee of the model robustness in limited scenarios. In this paper, we seek for a new approach to develop a theoretically principled neural network that inherently resists $\\ell_\\infty$ perturbations. In particular, we design a novel neuron that uses $\\ell_\\infty$-distance as its basic operation (which we call $\\ell_\\infty$-dist neuron), and show that any neural network constructed with $\\ell_\\infty$-dist neurons (called $\\ell_{\\infty}$-dist net) is naturally a 1-Lipschitz function with respect to $\\ell_\\infty$-norm. This directly provides a rigorous guarantee of the certified robustness based on the margin of prediction outputs. We then prove that such networks have enough expressive power to approximate any 1-Lipschitz function with robust generalization guarantee. We further provide a holistic training strategy that can greatly alleviate optimization difficulties. Experimental results show that using $\\ell_{\\infty}$-dist nets as basic building blocks, we consistently achieve state-of-the-art performance on commonly used datasets: 93.09% certified accuracy on MNIST ($\\epsilon=0.3$), 35.42% on CIFAR-10 ($\\epsilon=8/255$) and 16.31% on TinyImageNet ($\\epsilon=1/255$).}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21b/zhang21b.pdf", "supp": "", "pdf_size": 906383, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6201420149183682924&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Key Laboratory of Machine Perception, MOE, School of EECS, Peking University; Department of Electrical and Computer Engineering, Princeton University + Zhongguancun Haihua Institute for Frontier Information Technology; Department of Computer Science, Princeton University; Microsoft Research; Center for Data Science, Peking University", "aff_domain": "cis.pku.edu.cn; ; ; ; ", "email": "cis.pku.edu.cn; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhang21b.html", "aff_unique_index": "0;1+2;1;3;0", "aff_unique_norm": "Peking University;Princeton University;Zhongguancun Haihua Institute for Frontier Information Technology;Microsoft", "aff_unique_dep": "School of EECS;Department of Electrical and Computer Engineering;Institute for Frontier Information Technology;Microsoft Research", "aff_unique_url": "http://www.pku.edu.cn;https://www.princeton.edu;;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Peking U;Princeton;;MSR", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0;1+0;1;1;0", "aff_country_unique": "China;United States" }, { "title": "Towards Defending against Adversarial Examples via Attack-Invariant Features", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8629", "id": "8629", "proceeding": "http://proceedings.mlr.press/v139/zhou21e.html", "slides": "", "author_site": "Dawei Zhou, Tongliang Liu, Bo Han, Nannan Wang, Chunlei Peng, Xinbo Gao", "author": "Dawei Zhou; Tongliang Liu; Bo Han; Nannan Wang; Chunlei Peng; Xinbo Gao", "abstract": "Deep neural networks (DNNs) are vulnerable to adversarial noise. Their adversarial robustness can be improved by exploiting adversarial examples. However, given the continuously evolving attacks, models trained on seen types of adversarial examples generally cannot generalize well to unseen types of adversarial examples. To solve this problem, in this paper, we propose to remove adversarial noise by learning generalizable invariant features across attacks which maintain semantic classification information. Specifically, we introduce an adversarial feature learning mechanism to disentangle invariant features from adversarial noise. A normalization term has been proposed in the encoded space of the attack-invariant features to address the bias issue between the seen and unseen types of attacks. Empirical evaluations demonstrate that our method could provide better protection in comparison to previous state-of-the-art approaches, especially against unseen types of attacks and adaptive attacks.", "bibtex": "@InProceedings{pmlr-v139-zhou21e,\n title = \t {Towards Defending against Adversarial Examples via Attack-Invariant Features},\n author = {Zhou, Dawei and Liu, Tongliang and Han, Bo and Wang, Nannan and Peng, Chunlei and Gao, Xinbo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12835--12845},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhou21e/zhou21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhou21e.html},\n abstract = \t {Deep neural networks (DNNs) are vulnerable to adversarial noise. Their adversarial robustness can be improved by exploiting adversarial examples. However, given the continuously evolving attacks, models trained on seen types of adversarial examples generally cannot generalize well to unseen types of adversarial examples. To solve this problem, in this paper, we propose to remove adversarial noise by learning generalizable invariant features across attacks which maintain semantic classification information. Specifically, we introduce an adversarial feature learning mechanism to disentangle invariant features from adversarial noise. A normalization term has been proposed in the encoded space of the attack-invariant features to address the bias issue between the seen and unseen types of attacks. Empirical evaluations demonstrate that our method could provide better protection in comparison to previous state-of-the-art approaches, especially against unseen types of attacks and adaptive attacks.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhou21e/zhou21e.pdf", "supp": "", "pdf_size": 845894, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14479485330568063041&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "State Key Laboratory of Integrated Services Networks, School of Telecommunications Engineering, Xidian University; Trustworthy Machine Learning Lab, School of Computer Science, The University of Sydney; Department of Computer Science, Hong Kong Baptist University; State Key Laboratory of Integrated Services Networks, School of Cyber Engineering, Xidian University; State Key Laboratory of Integrated Services Networks, School of Cyber Engineering, Xidian University; Chongqing Key Laboratory of Image Cognition, Chongqing University of Posts and Telecommunications", "aff_domain": "xidian.edu.cn;sydney.edu.au;hkbu.edu.hk;xidian.edu.cn;xidian.edu.cn;cqupt.edu.cn", "email": "xidian.edu.cn;sydney.edu.au;hkbu.edu.hk;xidian.edu.cn;xidian.edu.cn;cqupt.edu.cn", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhou21e.html", "aff_unique_index": "0;1;2;0;0;3", "aff_unique_norm": "Xidian University;University of Sydney;Hong Kong Baptist University;Chongqing University of Posts and Telecommunications", "aff_unique_dep": "School of Telecommunications Engineering;School of Computer Science;Department of Computer Science;Chongqing Key Laboratory of Image Cognition", "aff_unique_url": "http://www.xidian.edu.cn/;https://www.sydney.edu.au;https://www.hkbu.edu.hk;", "aff_unique_abbr": "Xidian;USYD;HKBU;", "aff_campus_unique_index": "1", "aff_campus_unique": ";Hong Kong SAR", "aff_country_unique_index": "0;1;0;0;0;0", "aff_country_unique": "China;Australia" }, { "title": "Towards Distraction-Robust Active Visual Tracking", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8903", "id": "8903", "proceeding": "http://proceedings.mlr.press/v139/zhong21b.html", "slides": "", "author_site": "Fangwei Zhong, Peng Sun, Wenhan Luo, Tingyun Yan, Yizhou Wang", "author": "Fangwei Zhong; Peng Sun; Wenhan Luo; Tingyun Yan; Yizhou Wang", "abstract": "In active visual tracking, it is notoriously difficult when distracting objects appear, as distractors often mislead the tracker by occluding the target or bringing a confusing appearance. To address this issue, we propose a mixed cooperative-competitive multi-agent game, where a target and multiple distractors form a collaborative team to play against a tracker and make it fail to follow. Through learning in our game, diverse distracting behaviors of the distractors naturally emerge, thereby exposing the tracker\u2019s weakness, which helps enhance the distraction-robustness of the tracker. For effective learning, we then present a bunch of practical methods, including a reward function for distractors, a cross-modal teacher-student learning strategy, and a recurrent attention mechanism for the tracker. The experimental results show that our tracker performs desired distraction-robust active visual tracking and can be well generalized to unseen environments. We also show that the multi-agent game can be used to adversarially test the robustness of trackers.", "bibtex": "@InProceedings{pmlr-v139-zhong21b,\n title = \t {Towards Distraction-Robust Active Visual Tracking},\n author = {Zhong, Fangwei and Sun, Peng and Luo, Wenhan and Yan, Tingyun and Wang, Yizhou},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12782--12792},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhong21b/zhong21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhong21b.html},\n abstract = \t {In active visual tracking, it is notoriously difficult when distracting objects appear, as distractors often mislead the tracker by occluding the target or bringing a confusing appearance. To address this issue, we propose a mixed cooperative-competitive multi-agent game, where a target and multiple distractors form a collaborative team to play against a tracker and make it fail to follow. Through learning in our game, diverse distracting behaviors of the distractors naturally emerge, thereby exposing the tracker\u2019s weakness, which helps enhance the distraction-robustness of the tracker. For effective learning, we then present a bunch of practical methods, including a reward function for distractors, a cross-modal teacher-student learning strategy, and a recurrent attention mechanism for the tracker. The experimental results show that our tracker performs desired distraction-robust active visual tracking and can be well generalized to unseen environments. We also show that the multi-agent game can be used to adversarially test the robustness of trackers.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhong21b/zhong21b.pdf", "supp": "", "pdf_size": 2306762, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9464509045800143114&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Center on Frontiers of Computing Studies, Dept. of Computer Science, Peking University, Beijing, P.R. China; Tencent Robotics X, Shenzhen, P.R. China; Tencent, Shenzhen, P.R. China; Adv. Inst. of Info. Tech, Peking University, Hangzhou, P.R. China; Center on Frontiers of Computing Studies, Dept. of Computer Science, Peking University, Beijing, P.R. China", "aff_domain": "gmail.com; ; ; ;pku.edu.cn", "email": "gmail.com; ; ; ;pku.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/zhong21b.html", "aff_unique_index": "0;1;1;0;0", "aff_unique_norm": "Peking University;Tencent", "aff_unique_dep": "Dept. of Computer Science;Robotics X", "aff_unique_url": "http://www.pku.edu.cn;https://robotics.tencent.com", "aff_unique_abbr": "Peking U;Tencent Robotics X", "aff_campus_unique_index": "0;1;1;2;0", "aff_campus_unique": "Beijing;Shenzhen;Hangzhou", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Towards Domain-Agnostic Contrastive Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10693", "id": "10693", "proceeding": "http://proceedings.mlr.press/v139/verma21a.html", "slides": "/media/icml-2021/Slides/10693.pdf", "author_site": "Vikas Verma, Thang Luong, Kenji Kawaguchi, Hieu Pham, Quoc Le", "author": "Vikas Verma; Thang Luong; Kenji Kawaguchi; Hieu Pham; Quoc Le", "abstract": "Despite recent successes, most contrastive self-supervised learning methods are domain-specific, relying heavily on data augmentation techniques that require knowledge about a particular domain, such as image cropping and rotation. To overcome such limitation, we propose a domain-agnostic approach to contrastive learning, named DACL, that is applicable to problems where domain-specific data augmentations are not readily available. Key to our approach is the use of Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels. We theoretically analyze our method and show advantages over the Gaussian-noise based contrastive learning approach. To demonstrate the effectiveness of DACL, we conduct experiments across various domains such as tabular data, images, and graphs. Our results show that DACL not only outperforms other domain-agnostic noising methods, such as Gaussian-noise, but also combines well with domain-specific methods, such as SimCLR, to improve self-supervised visual representation learning.", "bibtex": "@InProceedings{pmlr-v139-verma21a,\n title = \t {Towards Domain-Agnostic Contrastive Learning},\n author = {Verma, Vikas and Luong, Thang and Kawaguchi, Kenji and Pham, Hieu and Le, Quoc},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10530--10541},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/verma21a/verma21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/verma21a.html},\n abstract = \t {Despite recent successes, most contrastive self-supervised learning methods are domain-specific, relying heavily on data augmentation techniques that require knowledge about a particular domain, such as image cropping and rotation. To overcome such limitation, we propose a domain-agnostic approach to contrastive learning, named DACL, that is applicable to problems where domain-specific data augmentations are not readily available. Key to our approach is the use of Mixup noise to create similar and dissimilar examples by mixing data samples differently either at the input or hidden-state levels. We theoretically analyze our method and show advantages over the Gaussian-noise based contrastive learning approach. To demonstrate the effectiveness of DACL, we conduct experiments across various domains such as tabular data, images, and graphs. Our results show that DACL not only outperforms other domain-agnostic noising methods, such as Gaussian-noise, but also combines well with domain-specific methods, such as SimCLR, to improve self-supervised visual representation learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/verma21a/verma21a.pdf", "supp": "", "pdf_size": 474134, "gs_citation": 149, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6601994961196588024&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Google Research, Brain Team + Aalto University, Finland; Google Research, Brain Team; Harvard University; Google Research, Brain Team; Google Research, Brain Team", "aff_domain": "aalto.fi;google.com;fas.harvard.edu;google.com;google.com", "email": "aalto.fi;google.com;fas.harvard.edu;google.com;google.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/verma21a.html", "aff_unique_index": "0+1;0;2;0;0", "aff_unique_norm": "Google;Aalto University;Harvard University", "aff_unique_dep": "Google Research;;", "aff_unique_url": "https://research.google;https://www.aalto.fi;https://www.harvard.edu", "aff_unique_abbr": "Google;Aalto;Harvard", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0+1;0;0;0;0", "aff_country_unique": "United States;Finland" }, { "title": "Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10713", "id": "10713", "proceeding": "http://proceedings.mlr.press/v139/rahman21a.html", "slides": "/media/icml-2021/Slides/10713.pdf", "author_site": "Muhammad Arrasy Rahman, Niklas Hopner, Filippos Christianos, Stefano V. Albrecht", "author": "Muhammad A Rahman; Niklas Hopner; Filippos Christianos; Stefano V Albrecht", "abstract": "Ad hoc teamwork is the challenging problem of designing an autonomous agent which can adapt quickly to collaborate with teammates without prior coordination mechanisms, including joint training. Prior work in this area has focused on closed teams in which the number of agents is fixed. In this work, we consider open teams by allowing agents with different fixed policies to enter and leave the environment without prior notification. Our solution builds on graph neural networks to learn agent models and joint-action value models under varying team compositions. We contribute a novel action-value computation that integrates the agent model and joint-action value model to produce action-value estimates. We empirically demonstrate that our approach successfully models the effects other agents have on the learner, leading to policies that robustly adapt to dynamic team compositions and significantly outperform several alternative methods.", "bibtex": "@InProceedings{pmlr-v139-rahman21a,\n title = \t {Towards Open Ad Hoc Teamwork Using Graph-based Policy Learning},\n author = {Rahman, Muhammad A and Hopner, Niklas and Christianos, Filippos and Albrecht, Stefano V},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8776--8786},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rahman21a/rahman21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rahman21a.html},\n abstract = \t {Ad hoc teamwork is the challenging problem of designing an autonomous agent which can adapt quickly to collaborate with teammates without prior coordination mechanisms, including joint training. Prior work in this area has focused on closed teams in which the number of agents is fixed. In this work, we consider open teams by allowing agents with different fixed policies to enter and leave the environment without prior notification. Our solution builds on graph neural networks to learn agent models and joint-action value models under varying team compositions. We contribute a novel action-value computation that integrates the agent model and joint-action value model to produce action-value estimates. We empirically demonstrate that our approach successfully models the effects other agents have on the learner, leading to policies that robustly adapt to dynamic team compositions and significantly outperform several alternative methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/rahman21a/rahman21a.pdf", "supp": "", "pdf_size": 751590, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13446293545265914898&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Informatics, University of Edinburgh, Edinburgh, United Kingdom+University of Amsterdam, Amsterdam, Netherlands; University of Amsterdam, Amsterdam, Netherlands; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom; School of Informatics, University of Edinburgh, Edinburgh, United Kingdom", "aff_domain": "ed.ac.uk; ; ; ", "email": "ed.ac.uk; ; ; ", "github": "https://github.com/uoe-agents/GPL", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/rahman21a.html", "aff_unique_index": "0+1;1;0;0", "aff_unique_norm": "University of Edinburgh;University of Amsterdam", "aff_unique_dep": "School of Informatics;", "aff_unique_url": "https://www.ed.ac.uk;https://www.uva.nl", "aff_unique_abbr": "Edinburgh;UvA", "aff_campus_unique_index": "0+1;1;0;0", "aff_campus_unique": "Edinburgh;Amsterdam", "aff_country_unique_index": "0+1;1;0;0", "aff_country_unique": "United Kingdom;Netherlands" }, { "title": "Towards Open-World Recommendation: An Inductive Model-based Collaborative Filtering Approach", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8799", "id": "8799", "proceeding": "http://proceedings.mlr.press/v139/wu21j.html", "slides": "", "author_site": "Qitian Wu, Hengrui Zhang, Xiaofeng Gao, Junchi Yan, Hongyuan Zha", "author": "Qitian Wu; Hengrui Zhang; Xiaofeng Gao; Junchi Yan; Hongyuan Zha", "abstract": "Recommendation models can effectively estimate underlying user interests and predict one\u2019s future behaviors by factorizing an observed user-item rating matrix into products of two sets of latent factors. However, the user-specific embedding factors can only be learned in a transductive way, making it difficult to handle new users on-the-fly. In this paper, we propose an inductive collaborative filtering framework that contains two representation models. The first model follows conventional matrix factorization which factorizes a group of key users\u2019 rating matrix to obtain meta latents. The second model resorts to attention-based structure learning that estimates hidden relations from query to key users and learns to leverage meta latents to inductively compute embeddings for query users via neural message passing. Our model enables inductive representation learning for users and meanwhile guarantees equivalent representation capacity as matrix factorization. Experiments demonstrate that our model achieves promising results for recommendation on few-shot users with limited training ratings and new unseen users which are commonly encountered in open-world recommender systems.", "bibtex": "@InProceedings{pmlr-v139-wu21j,\n title = \t {Towards Open-World Recommendation: An Inductive Model-based Collaborative Filtering Approach},\n author = {Wu, Qitian and Zhang, Hengrui and Gao, Xiaofeng and Yan, Junchi and Zha, Hongyuan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11329--11339},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21j/wu21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21j.html},\n abstract = \t {Recommendation models can effectively estimate underlying user interests and predict one\u2019s future behaviors by factorizing an observed user-item rating matrix into products of two sets of latent factors. However, the user-specific embedding factors can only be learned in a transductive way, making it difficult to handle new users on-the-fly. In this paper, we propose an inductive collaborative filtering framework that contains two representation models. The first model follows conventional matrix factorization which factorizes a group of key users\u2019 rating matrix to obtain meta latents. The second model resorts to attention-based structure learning that estimates hidden relations from query to key users and learns to leverage meta latents to inductively compute embeddings for query users via neural message passing. Our model enables inductive representation learning for users and meanwhile guarantees equivalent representation capacity as matrix factorization. Experiments demonstrate that our model achieves promising results for recommendation on few-shot users with limited training ratings and new unseen users which are commonly encountered in open-world recommender systems.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21j/wu21j.pdf", "supp": "", "pdf_size": 1072915, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13656226067206698249&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Arti\ufb01cial Intelligence, AI Institute, Shanghai Jiao Tong University; School of Data Science, Shenzhen Institute of Arti\ufb01cial Intelligence and Robotics for Society, The Chinese University of Hong Kong, Shenzhen", "aff_domain": "cs.sjtu.edu.cn; ; ; ; ", "email": "cs.sjtu.edu.cn; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wu21j.html", "aff_unique_index": "0+0;0+0;0+0;0+0;1", "aff_unique_norm": "Shanghai Jiao Tong University;Chinese University of Hong Kong, Shenzhen", "aff_unique_dep": "Department of Computer Science and Engineering;School of Data Science", "aff_unique_url": "https://www.sjtu.edu.cn;https://www.siat.ac.cn", "aff_unique_abbr": "SJTU;CUHK(SZ)", "aff_campus_unique_index": "1;1;1;1;2", "aff_campus_unique": ";Shanghai;Shenzhen", "aff_country_unique_index": "0+0;0+0;0+0;0+0;0", "aff_country_unique": "China" }, { "title": "Towards Practical Mean Bounds for Small Samples", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8979", "id": "8979", "proceeding": "http://proceedings.mlr.press/v139/phan21a.html", "slides": "/media/icml-2021/Slides/8979_iMuoueu.pdf", "author_site": "My Phan, Philip Thomas, Erik Learned-Miller", "author": "My Phan; Philip Thomas; Erik Learned-Miller", "abstract": "Historically, to bound the mean for small sample sizes, practitioners have had to choose between using methods with unrealistic assumptions about the unknown distribution (e.g., Gaussianity) and methods like Hoeffding\u2019s inequality that use weaker assumptions but produce much looser (wider) intervals. In 1969, \\citet{Anderson1969} proposed a mean confidence interval strictly better than or equal to Hoeffding\u2019s whose only assumption is that the distribution\u2019s support is contained in an interval $[a,b]$. For the first time since then, we present a new family of bounds that compares favorably to Anderson\u2019s. We prove that each bound in the family has {\\em guaranteed coverage}, i.e., it holds with probability at least $1-\\alpha$ for all distributions on an interval $[a,b]$. Furthermore, one of the bounds is tighter than or equal to Anderson\u2019s for all samples. In simulations, we show that for many distributions, the gain over Anderson\u2019s bound is substantial.", "bibtex": "@InProceedings{pmlr-v139-phan21a,\n title = \t {Towards Practical Mean Bounds for Small Samples},\n author = {Phan, My and Thomas, Philip and Learned-Miller, Erik},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8567--8576},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/phan21a/phan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/phan21a.html},\n abstract = \t {Historically, to bound the mean for small sample sizes, practitioners have had to choose between using methods with unrealistic assumptions about the unknown distribution (e.g., Gaussianity) and methods like Hoeffding\u2019s inequality that use weaker assumptions but produce much looser (wider) intervals. In 1969, \\citet{Anderson1969} proposed a mean confidence interval strictly better than or equal to Hoeffding\u2019s whose only assumption is that the distribution\u2019s support is contained in an interval $[a,b]$. For the first time since then, we present a new family of bounds that compares favorably to Anderson\u2019s. We prove that each bound in the family has {\\em guaranteed coverage}, i.e., it holds with probability at least $1-\\alpha$ for all distributions on an interval $[a,b]$. Furthermore, one of the bounds is tighter than or equal to Anderson\u2019s for all samples. In simulations, we show that for many distributions, the gain over Anderson\u2019s bound is substantial.}\n}", "pdf": "http://proceedings.mlr.press/v139/phan21a/phan21a.pdf", "supp": "", "pdf_size": 1321458, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=108164015875257038&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA; College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA; College of Information and Computer Sciences, University of Massachusetts, Amherst, MA, USA", "aff_domain": "cs.umass.edu; ; ", "email": "cs.umass.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/phan21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "College of Information and Computer Sciences", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Towards Rigorous Interpretations: a Formalisation of Feature Attribution", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10687", "id": "10687", "proceeding": "http://proceedings.mlr.press/v139/afchar21a.html", "slides": "/media/icml-2021/Slides/10687.pdf", "author_site": "Darius Afchar, Vincent Guigue, Romain Hennequin", "author": "Darius Afchar; Vincent Guigue; Romain Hennequin", "abstract": "Feature attribution is often loosely presented as the process of selecting a subset of relevant features as a rationale of a prediction. Task-dependent by nature, precise definitions of \"relevance\" encountered in the literature are however not always consistent. This lack of clarity stems from the fact that we usually do not have access to any notion of ground-truth attribution and from a more general debate on what good interpretations are. In this paper we propose to formalise feature selection/attribution based on the concept of relaxed functional dependence. In particular, we extend our notions to the instance-wise setting and derive necessary properties for candidate selection solutions, while leaving room for task-dependence. By computing ground-truth attributions on synthetic datasets, we evaluate many state-of-the-art attribution methods and show that, even when optimised, some fail to verify the proposed properties and provide wrong solutions.", "bibtex": "@InProceedings{pmlr-v139-afchar21a,\n title = \t {Towards Rigorous Interpretations: a Formalisation of Feature Attribution},\n author = {Afchar, Darius and Guigue, Vincent and Hennequin, Romain},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {76--86},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/afchar21a/afchar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/afchar21a.html},\n abstract = \t {Feature attribution is often loosely presented as the process of selecting a subset of relevant features as a rationale of a prediction. Task-dependent by nature, precise definitions of \"relevance\" encountered in the literature are however not always consistent. This lack of clarity stems from the fact that we usually do not have access to any notion of ground-truth attribution and from a more general debate on what good interpretations are. In this paper we propose to formalise feature selection/attribution based on the concept of relaxed functional dependence. In particular, we extend our notions to the instance-wise setting and derive necessary properties for candidate selection solutions, while leaving room for task-dependence. By computing ground-truth attributions on synthetic datasets, we evaluate many state-of-the-art attribution methods and show that, even when optimised, some fail to verify the proposed properties and provide wrong solutions.}\n}", "pdf": "http://proceedings.mlr.press/v139/afchar21a/afchar21a.pdf", "supp": "", "pdf_size": 994249, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6443235161573305083&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Deezer Research, Paris, France+LIP6, Paris, France; Deezer Research, Paris, France; LIP6, Paris, France", "aff_domain": "deezer.com; ; ", "email": "deezer.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/afchar21a.html", "aff_unique_index": "0+1;0;1", "aff_unique_norm": "Deezer Research;Laboratoire d'Informatique de Paris 6", "aff_unique_dep": "Research;LIP6", "aff_unique_url": "https://www.deezer.com;https://www.lip6.fr", "aff_unique_abbr": ";LIP6", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "Paris", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "France" }, { "title": "Towards Tight Bounds on the Sample Complexity of Average-reward MDPs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9781", "id": "9781", "proceeding": "http://proceedings.mlr.press/v139/jin21b.html", "slides": "/media/icml-2021/Slides/9781.pdf", "author_site": "Yujia Jin, Aaron Sidford", "author": "Yujia Jin; Aaron Sidford", "abstract": "We prove new upper and lower bounds for sample complexity of finding an $\\epsilon$-optimal policy of an infinite-horizon average-reward Markov decision process (MDP) given access to a generative model. When the mixing time of the probability transition matrix of all policies is at most $t_\\mathrm{mix}$, we provide an algorithm that solves the problem using $\\widetilde{O}(t_\\mathrm{mix} \\epsilon^{-3})$ (oblivious) samples per state-action pair. Further, we provide a lower bound showing that a linear dependence on $t_\\mathrm{mix}$ is necessary in the worst case for any algorithm which computes oblivious samples. We obtain our results by establishing connections between infinite-horizon average-reward MDPs and discounted MDPs of possible further utility.", "bibtex": "@InProceedings{pmlr-v139-jin21b,\n title = \t {Towards Tight Bounds on the Sample Complexity of Average-reward MDPs},\n author = {Jin, Yujia and Sidford, Aaron},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5055--5064},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/jin21b/jin21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/jin21b.html},\n abstract = \t {We prove new upper and lower bounds for sample complexity of finding an $\\epsilon$-optimal policy of an infinite-horizon average-reward Markov decision process (MDP) given access to a generative model. When the mixing time of the probability transition matrix of all policies is at most $t_\\mathrm{mix}$, we provide an algorithm that solves the problem using $\\widetilde{O}(t_\\mathrm{mix} \\epsilon^{-3})$ (oblivious) samples per state-action pair. Further, we provide a lower bound showing that a linear dependence on $t_\\mathrm{mix}$ is necessary in the worst case for any algorithm which computes oblivious samples. We obtain our results by establishing connections between infinite-horizon average-reward MDPs and discounted MDPs of possible further utility.}\n}", "pdf": "http://proceedings.mlr.press/v139/jin21b/jin21b.pdf", "supp": "", "pdf_size": 1974097, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2343158228804593771&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Management Science and Engineering, Stanford University, CA, United States; Management Science and Engineering, Stanford University, CA, United States", "aff_domain": "stanford.edu; ", "email": "stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/jin21b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Management Science and Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Towards Understanding Learning in Neural Networks with Linear Teachers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9639", "id": "9639", "proceeding": "http://proceedings.mlr.press/v139/sarussi21a.html", "slides": "", "author_site": "Roei Sarussi, Alon Brutzkus, Amir Globerson", "author": "Roei Sarussi; Alon Brutzkus; Amir Globerson", "abstract": "Can a neural network minimizing cross-entropy learn linearly separable data? Despite progress in the theory of deep learning, this question remains unsolved. Here we prove that SGD globally optimizes this learning problem for a two-layer network with Leaky ReLU activations. The learned network can in principle be very complex. However, empirical evidence suggests that it often turns out to be approximately linear. We provide theoretical support for this phenomenon by proving that if network weights converge to two weight clusters, this will imply an approximately linear decision boundary. Finally, we show a condition on the optimization that leads to weight clustering. We provide empirical results that validate our theoretical analysis.", "bibtex": "@InProceedings{pmlr-v139-sarussi21a,\n title = \t {Towards Understanding Learning in Neural Networks with Linear Teachers},\n author = {Sarussi, Roei and Brutzkus, Alon and Globerson, Amir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9313--9322},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sarussi21a/sarussi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sarussi21a.html},\n abstract = \t {Can a neural network minimizing cross-entropy learn linearly separable data? Despite progress in the theory of deep learning, this question remains unsolved. Here we prove that SGD globally optimizes this learning problem for a two-layer network with Leaky ReLU activations. The learned network can in principle be very complex. However, empirical evidence suggests that it often turns out to be approximately linear. We provide theoretical support for this phenomenon by proving that if network weights converge to two weight clusters, this will imply an approximately linear decision boundary. Finally, we show a condition on the optimization that leads to weight clustering. We provide empirical results that validate our theoretical analysis.}\n}", "pdf": "http://proceedings.mlr.press/v139/sarussi21a/sarussi21a.pdf", "supp": "", "pdf_size": 608916, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4579949070656958862&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University", "aff_domain": " Roei Sarussi;mail.tau.ac.il; Amir Globerson", "email": " Roei Sarussi;mail.tau.ac.il; Amir Globerson", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/sarussi21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Tel Aviv University", "aff_unique_dep": "Blavatnik School of Computer Science", "aff_unique_url": "https://www.tau.ac.il", "aff_unique_abbr": "TAU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tel Aviv", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Towards Understanding and Mitigating Social Biases in Language Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9611", "id": "9611", "proceeding": "http://proceedings.mlr.press/v139/liang21a.html", "slides": "/media/icml-2021/Slides/9611.pdf", "author_site": "Paul Liang, Chiyu Wu, Louis-Philippe Morency, Ruslan Salakhutdinov", "author": "Paul Pu Liang; Chiyu Wu; Louis-Philippe Morency; Ruslan Salakhutdinov", "abstract": "As machine learning methods are deployed in real-world settings such as healthcare, legal systems, and social science, it is crucial to recognize how they shape social biases and stereotypes in these sensitive decision-making processes. Among such real-world deployments are large-scale pretrained language models (LMs) that can be potentially dangerous in manifesting undesirable representational biases - harmful biases resulting from stereotyping that propagate negative generalizations involving gender, race, religion, and other social constructs. As a step towards improving the fairness of LMs, we carefully define several sources of representational biases before proposing new benchmarks and metrics to measure them. With these tools, we propose steps towards mitigating social biases during text generation. Our empirical results and human evaluation demonstrate effectiveness in mitigating bias while retaining crucial contextual information for high-fidelity text generation, thereby pushing forward the performance-fairness Pareto frontier.", "bibtex": "@InProceedings{pmlr-v139-liang21a,\n title = \t {Towards Understanding and Mitigating Social Biases in Language Models},\n author = {Liang, Paul Pu and Wu, Chiyu and Morency, Louis-Philippe and Salakhutdinov, Ruslan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6565--6576},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liang21a/liang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/liang21a.html},\n abstract = \t {As machine learning methods are deployed in real-world settings such as healthcare, legal systems, and social science, it is crucial to recognize how they shape social biases and stereotypes in these sensitive decision-making processes. Among such real-world deployments are large-scale pretrained language models (LMs) that can be potentially dangerous in manifesting undesirable representational biases - harmful biases resulting from stereotyping that propagate negative generalizations involving gender, race, religion, and other social constructs. As a step towards improving the fairness of LMs, we carefully define several sources of representational biases before proposing new benchmarks and metrics to measure them. With these tools, we propose steps towards mitigating social biases during text generation. Our empirical results and human evaluation demonstrate effectiveness in mitigating bias while retaining crucial contextual information for high-fidelity text generation, thereby pushing forward the performance-fairness Pareto frontier.}\n}", "pdf": "http://proceedings.mlr.press/v139/liang21a/liang21a.pdf", "supp": "", "pdf_size": 986449, "gs_citation": 490, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16764320017418997560&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ; ", "email": "cs.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/liang21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Towards the Unification and Robustness of Perturbation and Gradient Based Explanations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9991", "id": "9991", "proceeding": "http://proceedings.mlr.press/v139/agarwal21c.html", "slides": "", "author_site": "Sushant Agarwal, Shahin Jabbari, Chirag Agarwal, Sohini Upadhyay, Steven Wu, Hima Lakkaraju", "author": "Sushant Agarwal; Shahin Jabbari; Chirag Agarwal; Sohini Upadhyay; Steven Wu; Himabindu Lakkaraju", "abstract": "As machine learning black boxes are increasingly being deployed in critical domains such as healthcare and criminal justice, there has been a growing emphasis on developing techniques for explaining these black boxes in a post hoc manner. In this work, we analyze two popular post hoc interpretation techniques: SmoothGrad which is a gradient based method, and a variant of LIME which is a perturbation based method. More specifically, we derive explicit closed form expressions for the explanations output by these two methods and show that they both converge to the same explanation in expectation, i.e., when the number of perturbed samples used by these methods is large. We then leverage this connection to establish other desirable properties, such as robustness, for these techniques. We also derive finite sample complexity bounds for the number of perturbations required for these methods to converge to their expected explanation. Finally, we empirically validate our theory using extensive experimentation on both synthetic and real-world datasets.", "bibtex": "@InProceedings{pmlr-v139-agarwal21c,\n title = \t {Towards the Unification and Robustness of Perturbation and Gradient Based Explanations},\n author = {Agarwal, Sushant and Jabbari, Shahin and Agarwal, Chirag and Upadhyay, Sohini and Wu, Steven and Lakkaraju, Himabindu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {110--119},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/agarwal21c/agarwal21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/agarwal21c.html},\n abstract = \t {As machine learning black boxes are increasingly being deployed in critical domains such as healthcare and criminal justice, there has been a growing emphasis on developing techniques for explaining these black boxes in a post hoc manner. In this work, we analyze two popular post hoc interpretation techniques: SmoothGrad which is a gradient based method, and a variant of LIME which is a perturbation based method. More specifically, we derive explicit closed form expressions for the explanations output by these two methods and show that they both converge to the same explanation in expectation, i.e., when the number of perturbed samples used by these methods is large. We then leverage this connection to establish other desirable properties, such as robustness, for these techniques. We also derive finite sample complexity bounds for the number of perturbations required for these methods to converge to their expected explanation. Finally, we empirically validate our theory using extensive experimentation on both synthetic and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/agarwal21c/agarwal21c.pdf", "supp": "", "pdf_size": 1464187, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7496907784766567632&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "David R. Cheriton School of Computer Science, University of Waterloo; Department of Computer Science, Harvard University; Department of Computer Science, Harvard University; Department of Computer Science, Harvard University; School of Computer Science, Carnegie Mellon University; Department of Computer Science, Harvard University", "aff_domain": "uwaterloo.ca;seas.harvard.edu; ; ;cs.cmu.edu; ", "email": "uwaterloo.ca;seas.harvard.edu; ; ;cs.cmu.edu; ", "github": "", "project": "https://arxiv.org/abs/2102.10618", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/agarwal21c.html", "aff_unique_index": "0;1;1;1;2;1", "aff_unique_norm": "University of Waterloo;Harvard University;Carnegie Mellon University", "aff_unique_dep": "David R. Cheriton School of Computer Science;Department of Computer Science;School of Computer Science", "aff_unique_url": "https://uwaterloo.ca;https://www.harvard.edu;https://www.cmu.edu", "aff_unique_abbr": "UWaterloo;Harvard;CMU", "aff_campus_unique_index": "1;1;1;2;1", "aff_campus_unique": ";Cambridge;Pittsburgh", "aff_country_unique_index": "0;1;1;1;1;1", "aff_country_unique": "Canada;United States" }, { "title": "Tractable structured natural-gradient descent using local parameterizations", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9733", "id": "9733", "proceeding": "http://proceedings.mlr.press/v139/lin21e.html", "slides": "/media/icml-2021/Slides/9733_RBWzL5r.pdf", "author_site": "Wu Lin, Frank Nielsen, Khan Emtiyaz, Mark Schmidt", "author": "Wu Lin; Frank Nielsen; Khan Mohammad Emtiyaz; Mark Schmidt", "abstract": "Natural-gradient descent (NGD) on structured parameter spaces (e.g., low-rank covariances) is computationally challenging due to difficult Fisher-matrix computations. We address this issue by using \\emph{local-parameter coordinates} to obtain a flexible and efficient NGD method that works well for a wide-variety of structured parameterizations. We show four applications where our method (1) generalizes the exponential natural evolutionary strategy, (2) recovers existing Newton-like algorithms, (3) yields new structured second-order algorithms, and (4) gives new algorithms to learn covariances of Gaussian and Wishart-based distributions. We show results on a range of problems from deep learning, variational inference, and evolution strategies. Our work opens a new direction for scalable structured geometric methods.", "bibtex": "@InProceedings{pmlr-v139-lin21e,\n title = \t {Tractable structured natural-gradient descent using local parameterizations},\n author = {Lin, Wu and Nielsen, Frank and Emtiyaz, Khan Mohammad and Schmidt, Mark},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6680--6691},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lin21e/lin21e.pdf},\n url = \t {https://proceedings.mlr.press/v139/lin21e.html},\n abstract = \t {Natural-gradient descent (NGD) on structured parameter spaces (e.g., low-rank covariances) is computationally challenging due to difficult Fisher-matrix computations. We address this issue by using \\emph{local-parameter coordinates} to obtain a flexible and efficient NGD method that works well for a wide-variety of structured parameterizations. We show four applications where our method (1) generalizes the exponential natural evolutionary strategy, (2) recovers existing Newton-like algorithms, (3) yields new structured second-order algorithms, and (4) gives new algorithms to learn covariances of Gaussian and Wishart-based distributions. We show results on a range of problems from deep learning, variational inference, and evolution strategies. Our work opens a new direction for scalable structured geometric methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/lin21e/lin21e.pdf", "supp": "", "pdf_size": 2825812, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6372541937312421502&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "University of British Columbia; Sony Computer Science Laboratories Inc.; RIKEN Center for Advanced Intelligence Project; CIFAR AI Chair, Alberta Machine Intelligence Institute", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lin21e.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "University of British Columbia;Sony Computer Science Laboratories Inc.;RIKEN;Alberta Machine Intelligence Institute", "aff_unique_dep": ";;Center for Advanced Intelligence Project;CIFAR AI Chair", "aff_unique_url": "https://www.ubc.ca;https://www.sony.net/SCL/;https://www.riken.jp/en/;https://www.ami.alberta.ca", "aff_unique_abbr": "UBC;SCL;RIKEN;AMII", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "Canada;Japan" }, { "title": "Train simultaneously, generalize better: Stability of gradient-based minimax learners", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10111", "id": "10111", "proceeding": "http://proceedings.mlr.press/v139/farnia21a.html", "slides": "/media/icml-2021/Slides/10111.pdf", "author_site": "Farzan Farnia, Asuman Ozdaglar", "author": "Farzan Farnia; Asuman Ozdaglar", "abstract": "The success of minimax learning problems of generative adversarial networks (GANs) has been observed to depend on the minimax optimization algorithm used for their training. This dependence is commonly attributed to the convergence speed and robustness properties of the underlying optimization algorithm. In this paper, we show that the optimization algorithm also plays a key role in the generalization performance of the trained minimax model. To this end, we analyze the generalization properties of standard gradient descent ascent (GDA) and proximal point method (PPM) algorithms through the lens of algorithmic stability as defined by Bousquet & Elisseeff, 2002 under both convex-concave and nonconvex-nonconcave minimax settings. While the GDA algorithm is not guaranteed to have a vanishing excess risk in convex-concave problems, we show the PPM algorithm enjoys a bounded excess risk in the same setup. For nonconvex-nonconcave problems, we compare the generalization performance of stochastic GDA and GDmax algorithms where the latter fully solves the maximization subproblem at every iteration. Our generalization analysis suggests the superiority of GDA provided that the minimization and maximization subproblems are solved simultaneously with similar learning rates. We discuss several numerical results indicating the role of optimization algorithms in the generalization of learned minimax models.", "bibtex": "@InProceedings{pmlr-v139-farnia21a,\n title = \t {Train simultaneously, generalize better: Stability of gradient-based minimax learners},\n author = {Farnia, Farzan and Ozdaglar, Asuman},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3174--3185},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/farnia21a/farnia21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/farnia21a.html},\n abstract = \t {The success of minimax learning problems of generative adversarial networks (GANs) has been observed to depend on the minimax optimization algorithm used for their training. This dependence is commonly attributed to the convergence speed and robustness properties of the underlying optimization algorithm. In this paper, we show that the optimization algorithm also plays a key role in the generalization performance of the trained minimax model. To this end, we analyze the generalization properties of standard gradient descent ascent (GDA) and proximal point method (PPM) algorithms through the lens of algorithmic stability as defined by Bousquet & Elisseeff, 2002 under both convex-concave and nonconvex-nonconcave minimax settings. While the GDA algorithm is not guaranteed to have a vanishing excess risk in convex-concave problems, we show the PPM algorithm enjoys a bounded excess risk in the same setup. For nonconvex-nonconcave problems, we compare the generalization performance of stochastic GDA and GDmax algorithms where the latter fully solves the maximization subproblem at every iteration. Our generalization analysis suggests the superiority of GDA provided that the minimization and maximization subproblems are solved simultaneously with similar learning rates. We discuss several numerical results indicating the role of optimization algorithms in the generalization of learned minimax models.}\n}", "pdf": "http://proceedings.mlr.press/v139/farnia21a/farnia21a.pdf", "supp": "", "pdf_size": 1074522, "gs_citation": 53, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12342149833797282733&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Laboratory for Information & Decision Systems, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA; Laboratory for Information & Decision Systems, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA", "aff_domain": "mit.edu;mit.edu", "email": "mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/farnia21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Laboratory for Information & Decision Systems", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Training Adversarially Robust Sparse Networks via Bayesian Connectivity Sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8563", "id": "8563", "proceeding": "http://proceedings.mlr.press/v139/ozdenizci21a.html", "slides": "", "author_site": "Ozan \u00d6zdenizci, Robert Legenstein", "author": "Ozan \u00d6zdenizci; Robert Legenstein", "abstract": "Deep neural networks have been shown to be susceptible to adversarial attacks. This lack of adversarial robustness is even more pronounced when models are compressed in order to meet hardware limitations. Hence, if adversarial robustness is an issue, training of sparsely connected networks necessitates considering adversarially robust sparse learning. Motivated by the efficient and stable computational function of the brain in the presence of a highly dynamic synaptic connectivity structure, we propose an intrinsically sparse rewiring approach to train neural networks with state-of-the-art robust learning objectives under high sparsity. Importantly, in contrast to previously proposed pruning techniques, our approach satisfies global connectivity constraints throughout robust optimization, i.e., it does not require dense pre-training followed by pruning. Based on a Bayesian posterior sampling principle, a network rewiring process simultaneously learns the sparse connectivity structure and the robustness-accuracy trade-off based on the adversarial learning objective. Although our networks are sparsely connected throughout the whole training process, our experimental benchmark evaluations show that their performance is superior to recently proposed robustness-aware network pruning methods which start from densely connected networks.", "bibtex": "@InProceedings{pmlr-v139-ozdenizci21a,\n title = \t {Training Adversarially Robust Sparse Networks via Bayesian Connectivity Sampling},\n author = {{\\\"O}zdenizci, Ozan and Legenstein, Robert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8314--8324},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ozdenizci21a/ozdenizci21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ozdenizci21a.html},\n abstract = \t {Deep neural networks have been shown to be susceptible to adversarial attacks. This lack of adversarial robustness is even more pronounced when models are compressed in order to meet hardware limitations. Hence, if adversarial robustness is an issue, training of sparsely connected networks necessitates considering adversarially robust sparse learning. Motivated by the efficient and stable computational function of the brain in the presence of a highly dynamic synaptic connectivity structure, we propose an intrinsically sparse rewiring approach to train neural networks with state-of-the-art robust learning objectives under high sparsity. Importantly, in contrast to previously proposed pruning techniques, our approach satisfies global connectivity constraints throughout robust optimization, i.e., it does not require dense pre-training followed by pruning. Based on a Bayesian posterior sampling principle, a network rewiring process simultaneously learns the sparse connectivity structure and the robustness-accuracy trade-off based on the adversarial learning objective. Although our networks are sparsely connected throughout the whole training process, our experimental benchmark evaluations show that their performance is superior to recently proposed robustness-aware network pruning methods which start from densely connected networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/ozdenizci21a/ozdenizci21a.pdf", "supp": "", "pdf_size": 375702, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5087550303979200896&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Graz University of Technology, Institute of Theoretical Computer Science, Graz, Austria+Silicon Austria Labs, TU Graz - SAL Dependable Embedded Systems Lab, Graz, Austria; Graz University of Technology, Institute of Theoretical Computer Science, Graz, Austria", "aff_domain": "igi.tugraz.at; ", "email": "igi.tugraz.at; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/ozdenizci21a.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "Graz University of Technology;Silicon Austria Labs", "aff_unique_dep": "Institute of Theoretical Computer Science;SAL Dependable Embedded Systems Lab", "aff_unique_url": "https://www.tugraz.at;", "aff_unique_abbr": "TUGraz;", "aff_campus_unique_index": "0+0;0", "aff_campus_unique": "Graz", "aff_country_unique_index": "0+0;0", "aff_country_unique": "Austria" }, { "title": "Training Data Subset Selection for Regression with Controlled Generalization Error", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8757", "id": "8757", "proceeding": "http://proceedings.mlr.press/v139/s21a.html", "slides": "", "author_site": "Durga S, Rishabh Iyer, Ganesh Ramakrishnan, Abir De", "author": "Durga S; Rishabh Iyer; Ganesh Ramakrishnan; Abir De", "abstract": "Data subset selection from a large number of training instances has been a successful approach toward efficient and cost-effective machine learning. However, models trained on a smaller subset may show poor generalization ability. In this paper, our goal is to design an algorithm for selecting a subset of the training data, so that the model can be trained quickly, without significantly sacrificing on accuracy. More specifically, we focus on data subset selection for $L_2$ regularized regression problems and provide a novel problem formulation which seeks to minimize the training loss with respect to both the trainable parameters and the subset of training data, subject to error bounds on the validation set. We tackle this problem using several technical innovations. First, we represent this problem with simplified constraints using the dual of the original training problem and show that the objective of this new representation is a monotone and $\\alpha$-submodular function, for a wide variety of modeling choices. Such properties lead us to develop SELCON, an efficient majorization-minimization algorithm for data subset selection, that admits an approximation guarantee even when the training provides an imperfect estimate of the trained model. Finally, our experiments on several datasets show that SELCON trades off accuracy and efficiency more effectively than the current state-of-the-art.", "bibtex": "@InProceedings{pmlr-v139-s21a,\n title = \t {Training Data Subset Selection for Regression with Controlled Generalization Error},\n author = {S, Durga and Iyer, Rishabh and Ramakrishnan, Ganesh and De, Abir},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9202--9212},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/s21a/s21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/s21a.html},\n abstract = \t {Data subset selection from a large number of training instances has been a successful approach toward efficient and cost-effective machine learning. However, models trained on a smaller subset may show poor generalization ability. In this paper, our goal is to design an algorithm for selecting a subset of the training data, so that the model can be trained quickly, without significantly sacrificing on accuracy. More specifically, we focus on data subset selection for $L_2$ regularized regression problems and provide a novel problem formulation which seeks to minimize the training loss with respect to both the trainable parameters and the subset of training data, subject to error bounds on the validation set. We tackle this problem using several technical innovations. First, we represent this problem with simplified constraints using the dual of the original training problem and show that the objective of this new representation is a monotone and $\\alpha$-submodular function, for a wide variety of modeling choices. Such properties lead us to develop SELCON, an efficient majorization-minimization algorithm for data subset selection, that admits an approximation guarantee even when the training provides an imperfect estimate of the trained model. Finally, our experiments on several datasets show that SELCON trades off accuracy and efficiency more effectively than the current state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v139/s21a/s21a.pdf", "supp": "", "pdf_size": 630313, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8877772987506172355&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 2, "aff": "CSE Department, Indian Institute of Technology, Bombay; CS Department, University of Texas at Dallas; CSE Department, Indian Institute of Technology, Bombay; CSE Department, Indian Institute of Technology, Bombay", "aff_domain": "cse.iitb.ac.in; ; ; ", "email": "cse.iitb.ac.in; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/s21a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Indian Institute of Technology Bombay;University of Texas at Dallas", "aff_unique_dep": "CSE Department;Computer Science Department", "aff_unique_url": "https://www.iitb.ac.in;https://www.utdallas.edu", "aff_unique_abbr": "IIT Bombay;UT Dallas", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Bombay;Dallas", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "India;United States" }, { "title": "Training Graph Neural Networks with 1000 Layers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10455", "id": "10455", "proceeding": "http://proceedings.mlr.press/v139/li21o.html", "slides": "/media/icml-2021/Slides/10455.pdf", "author_site": "Guohao Li, Matthias M\u00fcller, Bernard Ghanem, Vladlen Koltun", "author": "Guohao Li; Matthias M\u00fcller; Bernard Ghanem; Vladlen Koltun", "abstract": "Deep graph neural networks (GNNs) have achieved excellent results on various tasks on increasingly large graph datasets with millions of nodes and edges. However, memory complexity has become a major obstacle when training deep GNNs for practical applications due to the immense number of nodes, edges, and intermediate activations. To improve the scalability of GNNs, prior works propose smart graph sampling or partitioning strategies to train GNNs with a smaller set of nodes or sub-graphs. In this work, we study reversible connections, group convolutions, weight tying, and equilibrium models to advance the memory and parameter efficiency of GNNs. We find that reversible connections in combination with deep network architectures enable the training of overparameterized GNNs that significantly outperform existing methods on multiple datasets. Our models RevGNN-Deep (1001 layers with 80 channels each) and RevGNN-Wide (448 layers with 224 channels each) were both trained on a single commodity GPU and achieve an ROC-AUC of 87.74 $\\pm$ 0.13 and 88.14 $\\pm$ 0.15 on the ogbn-proteins dataset. To the best of our knowledge, RevGNN-Deep is the deepest GNN in the literature by one order of magnitude.", "bibtex": "@InProceedings{pmlr-v139-li21o,\n title = \t {Training Graph Neural Networks with 1000 Layers},\n author = {Li, Guohao and M{\\\"u}ller, Matthias and Ghanem, Bernard and Koltun, Vladlen},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6437--6449},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21o/li21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21o.html},\n abstract = \t {Deep graph neural networks (GNNs) have achieved excellent results on various tasks on increasingly large graph datasets with millions of nodes and edges. However, memory complexity has become a major obstacle when training deep GNNs for practical applications due to the immense number of nodes, edges, and intermediate activations. To improve the scalability of GNNs, prior works propose smart graph sampling or partitioning strategies to train GNNs with a smaller set of nodes or sub-graphs. In this work, we study reversible connections, group convolutions, weight tying, and equilibrium models to advance the memory and parameter efficiency of GNNs. We find that reversible connections in combination with deep network architectures enable the training of overparameterized GNNs that significantly outperform existing methods on multiple datasets. Our models RevGNN-Deep (1001 layers with 80 channels each) and RevGNN-Wide (448 layers with 224 channels each) were both trained on a single commodity GPU and achieve an ROC-AUC of 87.74 $\\pm$ 0.13 and 88.14 $\\pm$ 0.15 on the ogbn-proteins dataset. To the best of our knowledge, RevGNN-Deep is the deepest GNN in the literature by one order of magnitude.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21o/li21o.pdf", "supp": "", "pdf_size": 4263477, "gs_citation": 308, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15935176559620317056&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Intel Labs; Intel Labs; King Abdullah University of Science and Technology; Intel Labs", "aff_domain": "kaust.edu.sa; ; ; ", "email": "kaust.edu.sa; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/li21o.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Intel;King Abdullah University of Science and Technology", "aff_unique_dep": "Intel Labs;", "aff_unique_url": "https://www.intel.com;https://www.kast.kau.edu.sa", "aff_unique_abbr": "Intel;KAUST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;Saudi Arabia" }, { "title": "Training Quantized Neural Networks to Global Optimality via Semidefinite Programming", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9259", "id": "9259", "proceeding": "http://proceedings.mlr.press/v139/bartan21a.html", "slides": "/media/icml-2021/Slides/9259.pdf", "author_site": "Burak Bartan, Mert Pilanci", "author": "Burak Bartan; Mert Pilanci", "abstract": "Neural networks (NNs) have been extremely successful across many tasks in machine learning. Quantization of NN weights has become an important topic due to its impact on their energy efficiency, inference time and deployment on hardware. Although post-training quantization is well-studied, training optimal quantized NNs involves combinatorial non-convex optimization problems which appear intractable. In this work, we introduce a convex optimization strategy to train quantized NNs with polynomial activations. Our method leverages hidden convexity in two-layer neural networks from the recent literature, semidefinite lifting, and Grothendieck\u2019s identity. Surprisingly, we show that certain quantized NN problems can be solved to global optimality provably in polynomial time in all relevant parameters via tight semidefinite relaxations. We present numerical examples to illustrate the effectiveness of our method.", "bibtex": "@InProceedings{pmlr-v139-bartan21a,\n title = \t {Training Quantized Neural Networks to Global Optimality via Semidefinite Programming},\n author = {Bartan, Burak and Pilanci, Mert},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {694--704},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bartan21a/bartan21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bartan21a.html},\n abstract = \t {Neural networks (NNs) have been extremely successful across many tasks in machine learning. Quantization of NN weights has become an important topic due to its impact on their energy efficiency, inference time and deployment on hardware. Although post-training quantization is well-studied, training optimal quantized NNs involves combinatorial non-convex optimization problems which appear intractable. In this work, we introduce a convex optimization strategy to train quantized NNs with polynomial activations. Our method leverages hidden convexity in two-layer neural networks from the recent literature, semidefinite lifting, and Grothendieck\u2019s identity. Surprisingly, we show that certain quantized NN problems can be solved to global optimality provably in polynomial time in all relevant parameters via tight semidefinite relaxations. We present numerical examples to illustrate the effectiveness of our method.}\n}", "pdf": "http://proceedings.mlr.press/v139/bartan21a/bartan21a.pdf", "supp": "", "pdf_size": 905127, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=506538187514949008&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering, Stanford University, CA, USA; Department of Electrical Engineering, Stanford University, CA, USA", "aff_domain": "stanford.edu;stanford.edu", "email": "stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/bartan21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Training Recurrent Neural Networks via Forward Propagation Through Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10383", "id": "10383", "proceeding": "http://proceedings.mlr.press/v139/kag21a.html", "slides": "", "author_site": "Anil Kag, Venkatesh Saligrama", "author": "Anil Kag; Venkatesh Saligrama", "abstract": "Back-propagation through time (BPTT) has been widely used for training Recurrent Neural Networks (RNNs). BPTT updates RNN parameters on an instance by back-propagating the error in time over the entire sequence length, and as a result, leads to poor trainability due to the well-known gradient explosion/decay phenomena. While a number of prior works have proposed to mitigate vanishing/explosion effect through careful RNN architecture design, these RNN variants still train with BPTT. We propose a novel forward-propagation algorithm, FPTT, where at each time, for an instance, we update RNN parameters by optimizing an instantaneous risk function. Our proposed risk is a regularization penalty at time $t$ that evolves dynamically based on previously observed losses, and allows for RNN parameter updates to converge to a stationary solution of the empirical RNN objective. We consider both sequence-to-sequence as well as terminal loss problems. Empirically FPTT outperforms BPTT on a number of well-known benchmark tasks, thus enabling architectures like LSTMs to solve long range dependencies problems.", "bibtex": "@InProceedings{pmlr-v139-kag21a,\n title = \t {Training Recurrent Neural Networks via Forward Propagation Through Time},\n author = {Kag, Anil and Saligrama, Venkatesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5189--5200},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kag21a/kag21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kag21a.html},\n abstract = \t {Back-propagation through time (BPTT) has been widely used for training Recurrent Neural Networks (RNNs). BPTT updates RNN parameters on an instance by back-propagating the error in time over the entire sequence length, and as a result, leads to poor trainability due to the well-known gradient explosion/decay phenomena. While a number of prior works have proposed to mitigate vanishing/explosion effect through careful RNN architecture design, these RNN variants still train with BPTT. We propose a novel forward-propagation algorithm, FPTT, where at each time, for an instance, we update RNN parameters by optimizing an instantaneous risk function. Our proposed risk is a regularization penalty at time $t$ that evolves dynamically based on previously observed losses, and allows for RNN parameter updates to converge to a stationary solution of the empirical RNN objective. We consider both sequence-to-sequence as well as terminal loss problems. Empirically FPTT outperforms BPTT on a number of well-known benchmark tasks, thus enabling architectures like LSTMs to solve long range dependencies problems.}\n}", "pdf": "http://proceedings.mlr.press/v139/kag21a/kag21a.pdf", "supp": "", "pdf_size": 803490, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4781045223536191579&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Department of Electrical and Computer Engineering, Boston University, USA; Department of Electrical and Computer Engineering, Boston University, USA", "aff_domain": "bu.edu; ", "email": "bu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kag21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Training data-efficient image transformers & distillation through attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8671", "id": "8671", "proceeding": "http://proceedings.mlr.press/v139/touvron21a.html", "slides": "", "author_site": "Hugo Touvron, Matthieu Cord, Douze Matthijs, Francisco Massa, Alexandre Sablayrolles, Herve Jegou", "author": "Hugo Touvron; Matthieu Cord; Matthijs Douze; Francisco Massa; Alexandre Sablayrolles; Herve Jegou", "abstract": "Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. These high-performing vision transformers are pre-trained with hundreds of millions of images using a large infrastructure, thereby limiting their adoption. In this work, we produce competitive convolution-free transformers trained on ImageNet only using a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop) on ImageNet with no external data. We also introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention, typically from a convnet teacher. The learned transformers are competitive (85.2% top-1 acc.) with the state of the art on ImageNet, and similarly when transferred to other tasks. We will share our code and models.", "bibtex": "@InProceedings{pmlr-v139-touvron21a,\n title = \t {Training data-efficient image transformers & distillation through attention},\n author = {Touvron, Hugo and Cord, Matthieu and Douze, Matthijs and Massa, Francisco and Sablayrolles, Alexandre and Jegou, Herve},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10347--10357},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/touvron21a/touvron21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/touvron21a.html},\n abstract = \t {Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. These high-performing vision transformers are pre-trained with hundreds of millions of images using a large infrastructure, thereby limiting their adoption. In this work, we produce competitive convolution-free transformers trained on ImageNet only using a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop) on ImageNet with no external data. We also introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention, typically from a convnet teacher. The learned transformers are competitive (85.2% top-1 acc.) with the state of the art on ImageNet, and similarly when transferred to other tasks. We will share our code and models.}\n}", "pdf": "http://proceedings.mlr.press/v139/touvron21a/touvron21a.pdf", "supp": "", "pdf_size": 354992, "gs_citation": 8716, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16235705232339507184&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Facebook AI + Sorbonne University; Facebook AI + Sorbonne University; Facebook AI; Facebook AI; Facebook AI; Facebook AI", "aff_domain": "fb.com; ; ; ; ; ", "email": "fb.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/touvron21a.html", "aff_unique_index": "0+1;0+1;0;0;0;0", "aff_unique_norm": "Meta;Sorbonne University", "aff_unique_dep": "Facebook AI;", "aff_unique_url": "https://www.facebook.com;https://www.sorbonne.universite.fr", "aff_unique_abbr": "Facebook AI;Sorbonne", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+1;0+1;0;0;0;0", "aff_country_unique": "United States;France" }, { "title": "Trajectory Diversity for Zero-Shot Coordination", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9433", "id": "9433", "proceeding": "http://proceedings.mlr.press/v139/lupu21a.html", "slides": "", "author_site": "Andrei Lupu, Brandon Cui, Hengyuan Hu, Jakob Foerster", "author": "Andrei Lupu; Brandon Cui; Hengyuan Hu; Jakob Foerster", "abstract": "We study the problem of zero-shot coordination (ZSC), where agents must independently produce strategies for a collaborative game that are compatible with novel partners not seen during training. Our first contribution is to consider the need for diversity in generating such agents. Because self-play (SP) agents control their own trajectory distribution during training, each policy typically only performs well on this exact distribution. As a result, they achieve low scores in ZSC, since playing with another agent is likely to put them in situations they have not encountered during training. To address this issue, we train a common best response (BR) to a population of agents, which we regulate to be diverse. To this end, we introduce \\textit{Trajectory Diversity} (TrajeDi) \u2013 a differentiable objective for generating diverse reinforcement learning policies. We derive TrajeDi as a generalization of the Jensen-Shannon divergence between policies and motivate it experimentally in two simple settings. We then focus on the collaborative card game Hanabi, demonstrating the scalability of our method and improving upon the cross-play scores of both independently trained SP agents and BRs to unregularized populations.", "bibtex": "@InProceedings{pmlr-v139-lupu21a,\n title = \t {Trajectory Diversity for Zero-Shot Coordination},\n author = {Lupu, Andrei and Cui, Brandon and Hu, Hengyuan and Foerster, Jakob},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7204--7213},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lupu21a/lupu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lupu21a.html},\n abstract = \t {We study the problem of zero-shot coordination (ZSC), where agents must independently produce strategies for a collaborative game that are compatible with novel partners not seen during training. Our first contribution is to consider the need for diversity in generating such agents. Because self-play (SP) agents control their own trajectory distribution during training, each policy typically only performs well on this exact distribution. As a result, they achieve low scores in ZSC, since playing with another agent is likely to put them in situations they have not encountered during training. To address this issue, we train a common best response (BR) to a population of agents, which we regulate to be diverse. To this end, we introduce \\textit{Trajectory Diversity} (TrajeDi) \u2013 a differentiable objective for generating diverse reinforcement learning policies. We derive TrajeDi as a generalization of the Jensen-Shannon divergence between policies and motivate it experimentally in two simple settings. We then focus on the collaborative card game Hanabi, demonstrating the scalability of our method and improving upon the cross-play scores of both independently trained SP agents and BRs to unregularized populations.}\n}", "pdf": "http://proceedings.mlr.press/v139/lupu21a/lupu21a.pdf", "supp": "", "pdf_size": 2115984, "gs_citation": 131, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10295336118246504626&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Mila, McGill University (Work done while at Facebook AI Research); Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "mail.mcgill.ca; ; ; ", "email": "mail.mcgill.ca; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/lupu21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "McGill University;Meta", "aff_unique_dep": "Mila;Facebook AI Research", "aff_unique_url": "https://www.mcgill.ca;https://research.facebook.com", "aff_unique_abbr": "McGill;FAIR", "aff_campus_unique_index": "0", "aff_campus_unique": "Montreal;", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "Canada;United States" }, { "title": "Transfer-Based Semantic Anomaly Detection", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9479", "id": "9479", "proceeding": "http://proceedings.mlr.press/v139/deecke21a.html", "slides": "", "author_site": "Lucas Deecke, Lukas Ruff, Robert Vandermeulen, Hakan Bilen", "author": "Lucas Deecke; Lukas Ruff; Robert A. Vandermeulen; Hakan Bilen", "abstract": "Detecting semantic anomalies is challenging due to the countless ways in which they may appear in real-world data. While enhancing the robustness of networks may be sufficient for modeling simplistic anomalies, there is no good known way of preparing models for all potential and unseen anomalies that can potentially occur, such as the appearance of new object classes. In this paper, we show that a previously overlooked strategy for anomaly detection (AD) is to introduce an explicit inductive bias toward representations transferred over from some large and varied semantic task. We rigorously verify our hypothesis in controlled trials that utilize intervention, and show that it gives rise to surprisingly effective auxiliary objectives that outperform previous AD paradigms.", "bibtex": "@InProceedings{pmlr-v139-deecke21a,\n title = \t {Transfer-Based Semantic Anomaly Detection},\n author = {Deecke, Lucas and Ruff, Lukas and Vandermeulen, Robert A. and Bilen, Hakan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2546--2558},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/deecke21a/deecke21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/deecke21a.html},\n abstract = \t {Detecting semantic anomalies is challenging due to the countless ways in which they may appear in real-world data. While enhancing the robustness of networks may be sufficient for modeling simplistic anomalies, there is no good known way of preparing models for all potential and unseen anomalies that can potentially occur, such as the appearance of new object classes. In this paper, we show that a previously overlooked strategy for anomaly detection (AD) is to introduce an explicit inductive bias toward representations transferred over from some large and varied semantic task. We rigorously verify our hypothesis in controlled trials that utilize intervention, and show that it gives rise to surprisingly effective auxiliary objectives that outperform previous AD paradigms.}\n}", "pdf": "http://proceedings.mlr.press/v139/deecke21a/deecke21a.pdf", "supp": "", "pdf_size": 644433, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11326401675293682123&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Edinburgh; Aignostics (majority of work done while with TU Berlin); ML Group, TU Berlin; University of Edinburgh", "aff_domain": "ed.ac.uk; ; ; ", "email": "ed.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/deecke21a.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "University of Edinburgh;Technical University of Berlin", "aff_unique_dep": ";", "aff_unique_url": "https://www.ed.ac.uk;https://www.tu-berlin.de", "aff_unique_abbr": "Edinburgh;TU Berlin", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Berlin", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United Kingdom;Germany" }, { "title": "Trees with Attention for Set Prediction Tasks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10127", "id": "10127", "proceeding": "http://proceedings.mlr.press/v139/hirsch21a.html", "slides": "", "author_site": "Roy Hirsch, Ran Gilad-Bachrach", "author": "Roy Hirsch; Ran Gilad-Bachrach", "abstract": "In many machine learning applications, each record represents a set of items. For example, when making predictions from medical records, the medications prescribed to a patient are a set whose size is not fixed and whose order is arbitrary. However, most machine learning algorithms are not designed to handle set structures and are limited to processing records of fixed size. Set-Tree, presented in this work, extends the support for sets to tree-based models, such as Random-Forest and Gradient-Boosting, by introducing an attention mechanism and set-compatible split criteria. We evaluate the new method empirically on a wide range of problems ranging from making predictions on sub-atomic particle jets to estimating the redshift of galaxies. The new method outperforms existing tree-based methods consistently and significantly. Moreover, it is competitive and often outperforms Deep Learning. We also discuss the theoretical properties of Set-Trees and explain how they enable item-level explainability.", "bibtex": "@InProceedings{pmlr-v139-hirsch21a,\n title = \t {Trees with Attention for Set Prediction Tasks},\n author = {Hirsch, Roy and Gilad-Bachrach, Ran},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4250--4261},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hirsch21a/hirsch21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hirsch21a.html},\n abstract = \t {In many machine learning applications, each record represents a set of items. For example, when making predictions from medical records, the medications prescribed to a patient are a set whose size is not fixed and whose order is arbitrary. However, most machine learning algorithms are not designed to handle set structures and are limited to processing records of fixed size. Set-Tree, presented in this work, extends the support for sets to tree-based models, such as Random-Forest and Gradient-Boosting, by introducing an attention mechanism and set-compatible split criteria. We evaluate the new method empirically on a wide range of problems ranging from making predictions on sub-atomic particle jets to estimating the redshift of galaxies. The new method outperforms existing tree-based methods consistently and significantly. Moreover, it is competitive and often outperforms Deep Learning. We also discuss the theoretical properties of Set-Trees and explain how they enable item-level explainability.}\n}", "pdf": "http://proceedings.mlr.press/v139/hirsch21a/hirsch21a.pdf", "supp": "", "pdf_size": 1655718, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8916867411595092231&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Department of EE, Tel-Aviv University, Israel; Department of Bio-Medical Engineering, Tel-Aviv University, Israel + the Edmond J. Safra Center for Bioinformatics", "aff_domain": "mail.tau.ac.il;tauex.tau.ac.il", "email": "mail.tau.ac.il;tauex.tau.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/hirsch21a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "Tel-Aviv University;Edmond J. Safra Center for Bioinformatics", "aff_unique_dep": "Department of EE;Bioinformatics", "aff_unique_url": "https://www.tau.ac.il;https://bioinformatics.sciences.ias.edu", "aff_unique_abbr": "TAU;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0+0", "aff_country_unique": "Israel" }, { "title": "Two Heads are Better Than One: Hypergraph-Enhanced Graph Reasoning for Visual Event Ratiocination", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9031", "id": "9031", "proceeding": "http://proceedings.mlr.press/v139/zheng21b.html", "slides": "", "author_site": "Wenbo Zheng, Lan Yan, Chao Gou, Fei-Yue Wang", "author": "Wenbo Zheng; Lan Yan; Chao Gou; Fei-Yue Wang", "abstract": "Even with a still image, humans can ratiocinate various visual cause-and-effect descriptions before, at present, and after, as well as beyond the given image. However, it is challenging for models to achieve such task\u2013the visual event ratiocination, owing to the limitations of time and space. To this end, we propose a novel multi-modal model, Hypergraph-Enhanced Graph Reasoning. First it represents the contents from the same modality as a semantic graph and mines the intra-modality relationship, therefore breaking the limitations in the spatial domain. Then, we introduce the Graph Self-Attention Enhancement. On the one hand, this enables semantic graph representations from different modalities to enhance each other and captures the inter-modality relationship along the line. On the other hand, it utilizes our built multi-modal hypergraphs in different moments to boost individual semantic graph representations, and breaks the limitations in the temporal domain. Our method illustrates the case of \"two heads are better than one\" in the sense that semantic graph representations with the help of the proposed enhancement mechanism are more robust than those without. Finally, we re-project these representations and leverage their outcomes to generate textual cause-and-effect descriptions. Experimental results show that our model achieves significantly higher performance in comparison with other state-of-the-arts.", "bibtex": "@InProceedings{pmlr-v139-zheng21b,\n title = \t {Two Heads are Better Than One: Hypergraph-Enhanced Graph Reasoning for Visual Event Ratiocination},\n author = {Zheng, Wenbo and Yan, Lan and Gou, Chao and Wang, Fei-Yue},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12747--12760},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zheng21b/zheng21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/zheng21b.html},\n abstract = \t {Even with a still image, humans can ratiocinate various visual cause-and-effect descriptions before, at present, and after, as well as beyond the given image. However, it is challenging for models to achieve such task\u2013the visual event ratiocination, owing to the limitations of time and space. To this end, we propose a novel multi-modal model, Hypergraph-Enhanced Graph Reasoning. First it represents the contents from the same modality as a semantic graph and mines the intra-modality relationship, therefore breaking the limitations in the spatial domain. Then, we introduce the Graph Self-Attention Enhancement. On the one hand, this enables semantic graph representations from different modalities to enhance each other and captures the inter-modality relationship along the line. On the other hand, it utilizes our built multi-modal hypergraphs in different moments to boost individual semantic graph representations, and breaks the limitations in the temporal domain. Our method illustrates the case of \"two heads are better than one\" in the sense that semantic graph representations with the help of the proposed enhancement mechanism are more robust than those without. Finally, we re-project these representations and leverage their outcomes to generate textual cause-and-effect descriptions. Experimental results show that our model achieves significantly higher performance in comparison with other state-of-the-arts.}\n}", "pdf": "http://proceedings.mlr.press/v139/zheng21b/zheng21b.pdf", "supp": "", "pdf_size": 2925478, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10084374489064616244&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "School of Software Engineering, Xi\u2019an Jiaotong University, Xi\u2019an 710049, China+The State Key Laboratory for Management and Control of Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China; The State Key Laboratory for Management and Control of Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing 100190, China; School of Intelligent Systems Engineering, Sun Yat-sen University, Guangzhou 510275, China; The State Key Laboratory for Management and Control of Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China", "aff_domain": "ia.ac.cn; ; ;ia.ac.cn", "email": "ia.ac.cn; ; ;ia.ac.cn", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/zheng21b.html", "aff_unique_index": "0+1;1+2;3;1", "aff_unique_norm": "Xi'an Jiao Tong University;Chinese Academy of Sciences;University of Chinese Academy of Sciences;Sun Yat-sen University", "aff_unique_dep": "School of Software Engineering;Institute of Automation;School of Artificial Intelligence;School of Intelligent Systems Engineering", "aff_unique_url": "http://www.xjtu.edu.cn;http://www.ia.cas.cn;http://www.ucas.ac.cn;http://www.sysu.edu.cn/", "aff_unique_abbr": "XJTU;CAS;UCAS;SYSU", "aff_campus_unique_index": "0+1;1+1;2;1", "aff_campus_unique": "Xi'an;Beijing;Guangzhou", "aff_country_unique_index": "0+0;0+0;0;0", "aff_country_unique": "China" }, { "title": "Two-way kernel matrix puncturing: towards resource-efficient PCA and spectral clustering", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10275", "id": "10275", "proceeding": "http://proceedings.mlr.press/v139/couillet21a.html", "slides": "/media/icml-2021/Slides/10275.pdf", "author_site": "Romain COUILLET, Florent Chatelain, Nicolas Le Bihan", "author": "Romain Couillet; Florent Chatelain; Nicolas Le Bihan", "abstract": "The article introduces an elementary cost and storage reduction method for spectral clustering and principal component analysis. The method consists in randomly \u201cpuncturing\u201d both the data matrix $X\\in\\mathbb{C}^{p\\times n}$ (or $\\mathbb{R}^{p\\times n}$) and its corresponding kernel (Gram) matrix $K$ through Bernoulli masks: $S\\in\\{0,1\\}^{p\\times n}$ for $X$ and $B\\in\\{0,1\\}^{n\\times n}$ for $K$. The resulting \u201ctwo-way punctured\u201d kernel is thus given by $K=\\frac1p[(X\\odot S)^\\H (X\\odot S)]\\odot B$. We demonstrate that, for $X$ composed of independent columns drawn from a Gaussian mixture model, as $n,p\\to\\infty$ with $p/n\\to c_0\\in(0,\\infty)$, the spectral behavior of $K$ \u2013 its limiting eigenvalue distribution, as well as its isolated eigenvalues and eigenvectors \u2013 is fully tractable and exhibits a series of counter-intuitive phenomena. We notably prove, and empirically confirm on various image databases, that it is possible to drastically puncture the data, thereby providing possibly huge computational and storage gains, for a virtually constant (clustering or PCA) performance. This preliminary study opens as such the path towards rethinking, from a large dimensional standpoint, computational and storage costs in elementary machine learning models.", "bibtex": "@InProceedings{pmlr-v139-couillet21a,\n title = \t {Two-way kernel matrix puncturing: towards resource-efficient PCA and spectral clustering},\n author = {Couillet, Romain and Chatelain, Florent and Bihan, Nicolas Le},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2156--2165},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/couillet21a/couillet21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/couillet21a.html},\n abstract = \t {The article introduces an elementary cost and storage reduction method for spectral clustering and principal component analysis. The method consists in randomly \u201cpuncturing\u201d both the data matrix $X\\in\\mathbb{C}^{p\\times n}$ (or $\\mathbb{R}^{p\\times n}$) and its corresponding kernel (Gram) matrix $K$ through Bernoulli masks: $S\\in\\{0,1\\}^{p\\times n}$ for $X$ and $B\\in\\{0,1\\}^{n\\times n}$ for $K$. The resulting \u201ctwo-way punctured\u201d kernel is thus given by $K=\\frac1p[(X\\odot S)^\\H (X\\odot S)]\\odot B$. We demonstrate that, for $X$ composed of independent columns drawn from a Gaussian mixture model, as $n,p\\to\\infty$ with $p/n\\to c_0\\in(0,\\infty)$, the spectral behavior of $K$ \u2013 its limiting eigenvalue distribution, as well as its isolated eigenvalues and eigenvectors \u2013 is fully tractable and exhibits a series of counter-intuitive phenomena. We notably prove, and empirically confirm on various image databases, that it is possible to drastically puncture the data, thereby providing possibly huge computational and storage gains, for a virtually constant (clustering or PCA) performance. This preliminary study opens as such the path towards rethinking, from a large dimensional standpoint, computational and storage costs in elementary machine learning models.}\n}", "pdf": "http://proceedings.mlr.press/v139/couillet21a/couillet21a.pdf", "supp": "", "pdf_size": 3219233, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12123365507923827000&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "GIPSA-lab, CNRS, Grenoble-INP, University Grenoble-Alps+CentraleSup\u00e9lec, University Paris Saclay; GIPSA-lab, CNRS, Grenoble-INP, University Grenoble-Alps; GIPSA-lab, CNRS, Grenoble-INP, University Grenoble-Alps", "aff_domain": "gipsa-lab.grenoble-inp.fr;gipsa-lab.grenoble-inp.fr;gipsa-lab.grenoble-inp.fr", "email": "gipsa-lab.grenoble-inp.fr;gipsa-lab.grenoble-inp.fr;gipsa-lab.grenoble-inp.fr", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/couillet21a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "University Grenoble-Alps;CentraleSup\u00e9lec", "aff_unique_dep": "GIPSA-lab;", "aff_unique_url": "https://www.univ-grenoble-alps.fr;https://www.centralesupelec.fr", "aff_unique_abbr": "UGA;CS", "aff_campus_unique_index": "0+1;0;0", "aff_campus_unique": "Grenoble;University Paris Saclay", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "France" }, { "title": "UCB Momentum Q-learning: Correcting the bias without forgetting", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8561", "id": "8561", "proceeding": "http://proceedings.mlr.press/v139/menard21b.html", "slides": "", "author_site": "Pierre Menard, Omar Darwiche Domingues, Xuedong Shang, Michal Valko", "author": "Pierre Menard; Omar Darwiche Domingues; Xuedong Shang; Michal Valko", "abstract": "We propose UCBMQ, Upper Confidence Bound Momentum Q-learning, a new algorithm for reinforcement learning in tabular and possibly stage-dependent, episodic Markov decision process. UCBMQ is based on Q-learning where we add a momentum term and rely on the principle of optimism in face of uncertainty to deal with exploration. Our new technical ingredient of UCBMQ is the use of momentum to correct the bias that Q-learning suffers while, \\emph{at the same time}, limiting the impact it has on the second-order term of the regret. For UCBMQ, we are able to guarantee a regret of at most $\\tilde{O}(\\sqrt{H^3SAT}+ H^4 S A)$ where $H$ is the length of an episode, $S$ the number of states, $A$ the number of actions, $T$ the number of episodes and ignoring terms in poly$\\log(SAHT)$. Notably, UCBMQ is the first algorithm that simultaneously matches the lower bound of $\\Omega(\\sqrt{H^3SAT})$ for large enough $T$ and has a second-order term (with respect to $T$) that scales \\emph{only linearly} with the number of states $S$.", "bibtex": "@InProceedings{pmlr-v139-menard21b,\n title = \t {UCB Momentum Q-learning: Correcting the bias without forgetting},\n author = {Menard, Pierre and Domingues, Omar Darwiche and Shang, Xuedong and Valko, Michal},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7609--7618},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/menard21b/menard21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/menard21b.html},\n abstract = \t {We propose UCBMQ, Upper Confidence Bound Momentum Q-learning, a new algorithm for reinforcement learning in tabular and possibly stage-dependent, episodic Markov decision process. UCBMQ is based on Q-learning where we add a momentum term and rely on the principle of optimism in face of uncertainty to deal with exploration. Our new technical ingredient of UCBMQ is the use of momentum to correct the bias that Q-learning suffers while, \\emph{at the same time}, limiting the impact it has on the second-order term of the regret. For UCBMQ, we are able to guarantee a regret of at most $\\tilde{O}(\\sqrt{H^3SAT}+ H^4 S A)$ where $H$ is the length of an episode, $S$ the number of states, $A$ the number of actions, $T$ the number of episodes and ignoring terms in poly$\\log(SAHT)$. Notably, UCBMQ is the first algorithm that simultaneously matches the lower bound of $\\Omega(\\sqrt{H^3SAT})$ for large enough $T$ and has a second-order term (with respect to $T$) that scales \\emph{only linearly} with the number of states $S$.}\n}", "pdf": "http://proceedings.mlr.press/v139/menard21b/menard21b.pdf", "supp": "", "pdf_size": 2686435, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13418224994694979040&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Otto von Guericke University; Inria; Universit\u00e9 de Lille; DeepMind Paris", "aff_domain": "ovgu.de;inria.fr; ; ", "email": "ovgu.de;inria.fr; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/menard21b.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Otto von Guericke University Magdeburg;INRIA;Universit\u00e9 de Lille;DeepMind", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.ovgu.de;https://www.inria.fr;https://www.univ-lille.fr;https://deepmind.com", "aff_unique_abbr": "OVGU;Inria;UdeL;DeepMind", "aff_campus_unique_index": "1", "aff_campus_unique": ";Paris", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "Germany;France" }, { "title": "UnICORNN: A recurrent model for learning very long time dependencies", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10541", "id": "10541", "proceeding": "http://proceedings.mlr.press/v139/rusch21a.html", "slides": "/media/icml-2021/Slides/10541.pdf", "author_site": "T. Konstantin Rusch, Siddhartha Mishra", "author": "T. Konstantin Rusch; Siddhartha Mishra", "abstract": "The design of recurrent neural networks (RNNs) to accurately process sequential inputs with long-time dependencies is very challenging on account of the exploding and vanishing gradient problem. To overcome this, we propose a novel RNN architecture which is based on a structure preserving discretization of a Hamiltonian system of second-order ordinary differential equations that models networks of oscillators. The resulting RNN is fast, invertible (in time), memory efficient and we derive rigorous bounds on the hidden state gradients to prove the mitigation of the exploding and vanishing gradient problem. A suite of experiments are presented to demonstrate that the proposed RNN provides state of the art performance on a variety of learning tasks with (very) long-time dependencies.", "bibtex": "@InProceedings{pmlr-v139-rusch21a,\n title = \t {UnICORNN: A recurrent model for learning very long time dependencies},\n author = {Rusch, T. Konstantin and Mishra, Siddhartha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9168--9178},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/rusch21a/rusch21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/rusch21a.html},\n abstract = \t {The design of recurrent neural networks (RNNs) to accurately process sequential inputs with long-time dependencies is very challenging on account of the exploding and vanishing gradient problem. To overcome this, we propose a novel RNN architecture which is based on a structure preserving discretization of a Hamiltonian system of second-order ordinary differential equations that models networks of oscillators. The resulting RNN is fast, invertible (in time), memory efficient and we derive rigorous bounds on the hidden state gradients to prove the mitigation of the exploding and vanishing gradient problem. A suite of experiments are presented to demonstrate that the proposed RNN provides state of the art performance on a variety of learning tasks with (very) long-time dependencies.}\n}", "pdf": "http://proceedings.mlr.press/v139/rusch21a/rusch21a.pdf", "supp": "", "pdf_size": 518571, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16728515819525304575&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Seminar for Applied Mathematics (SAM), D-MATH, ETH Z\u00fcrich, R\u00e4mistrasse 101, Z\u00fcrich-8092, Switzerland; Seminar for Applied Mathematics (SAM), D-MATH, ETH Z\u00fcrich, R\u00e4mistrasse 101, Z\u00fcrich-8092, Switzerland", "aff_domain": "sam.math.ethz.ch; ", "email": "sam.math.ethz.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/rusch21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Seminar for Applied Mathematics (SAM)", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETH", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Z\u00fcrich", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Unbalanced minibatch Optimal Transport; applications to Domain Adaptation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8555", "id": "8555", "proceeding": "http://proceedings.mlr.press/v139/fatras21a.html", "slides": "/media/icml-2021/Slides/8555.pdf", "author_site": "Kilian Fatras, Thibault S\u00e9journ\u00e9, R\u00e9mi Flamary, Nicolas Courty", "author": "Kilian Fatras; Thibault Sejourne; R\u00e9mi Flamary; Nicolas Courty", "abstract": "Optimal transport distances have found many applications in machine learning for their capacity to compare non-parametric probability distributions. Yet their algorithmic complexity generally prevents their direct use on large scale datasets. Among the possible strategies to alleviate this issue, practitioners can rely on computing estimates of these distances over subsets of data, i.e. minibatches. While computationally appealing, we highlight in this paper some limits of this strategy, arguing it can lead to undesirable smoothing effects. As an alternative, we suggest that the same minibatch strategy coupled with unbalanced optimal transport can yield more robust behaviors. We discuss the associated theoretical properties, such as unbiased estimators, existence of gradients and concentration bounds. Our experimental study shows that in challenging problems associated to domain adaptation, the use of unbalanced optimal transport leads to significantly better results, competing with or surpassing recent baselines.", "bibtex": "@InProceedings{pmlr-v139-fatras21a,\n title = \t {Unbalanced minibatch Optimal Transport; applications to Domain Adaptation},\n author = {Fatras, Kilian and Sejourne, Thibault and Flamary, R{\\'e}mi and Courty, Nicolas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3186--3197},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fatras21a/fatras21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/fatras21a.html},\n abstract = \t {Optimal transport distances have found many applications in machine learning for their capacity to compare non-parametric probability distributions. Yet their algorithmic complexity generally prevents their direct use on large scale datasets. Among the possible strategies to alleviate this issue, practitioners can rely on computing estimates of these distances over subsets of data, i.e. minibatches. While computationally appealing, we highlight in this paper some limits of this strategy, arguing it can lead to undesirable smoothing effects. As an alternative, we suggest that the same minibatch strategy coupled with unbalanced optimal transport can yield more robust behaviors. We discuss the associated theoretical properties, such as unbiased estimators, existence of gradients and concentration bounds. Our experimental study shows that in challenging problems associated to domain adaptation, the use of unbalanced optimal transport leads to significantly better results, competing with or surpassing recent baselines.}\n}", "pdf": "http://proceedings.mlr.press/v139/fatras21a/fatras21a.pdf", "supp": "", "pdf_size": 3112725, "gs_citation": 182, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4565652805979542780&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 23, "aff": "Univ. Bretagne-Sud, CNRS, INRIA, IRISA, France; ENS, PSL University; Univ. Bretagne-Sud, CNRS, INRIA, IRISA, France; \u00b4Ecole Polytechnique, CMAP, France", "aff_domain": "irisa.fr; ; ;", "email": "irisa.fr; ; ;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/fatras21a.html", "aff_unique_index": "0;1;0;2", "aff_unique_norm": "University of Bretagne-Sud;\u00c9cole Normale Sup\u00e9rieure;Ecole Polytechnique", "aff_unique_dep": ";;CMAP", "aff_unique_url": "https://www.univ-ubs.fr;https://www.ens.psl.eu;https://www.polytechnique.edu", "aff_unique_abbr": "UBS;ENS;Polytechnique", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "France" }, { "title": "Unbiased Gradient Estimation in Unrolled Computation Graphs with Persistent Evolution Strategies", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10175", "id": "10175", "proceeding": "http://proceedings.mlr.press/v139/vicol21a.html", "slides": "/media/icml-2021/Slides/10175.pdf", "author_site": "Paul Vicol, Luke Metz, Jascha Sohl-Dickstein", "author": "Paul Vicol; Luke Metz; Jascha Sohl-Dickstein", "abstract": "Unrolled computation graphs arise in many scenarios, including training RNNs, tuning hyperparameters through unrolled optimization, and training learned optimizers. Current approaches to optimizing parameters in such computation graphs suffer from high variance gradients, bias, slow updates, or large memory usage. We introduce a method called Persistent Evolution Strategies (PES), which divides the computation graph into a series of truncated unrolls, and performs an evolution strategies-based update step after each unroll. PES eliminates bias from these truncations by accumulating correction terms over the entire sequence of unrolls. PES allows for rapid parameter updates, has low memory usage, is unbiased, and has reasonable variance characteristics. We experimentally demonstrate the advantages of PES compared to several other methods for gradient estimation on synthetic tasks, and show its applicability to training learned optimizers and tuning hyperparameters.", "bibtex": "@InProceedings{pmlr-v139-vicol21a,\n title = \t {Unbiased Gradient Estimation in Unrolled Computation Graphs with Persistent Evolution Strategies},\n author = {Vicol, Paul and Metz, Luke and Sohl-Dickstein, Jascha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10553--10563},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/vicol21a/vicol21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/vicol21a.html},\n abstract = \t {Unrolled computation graphs arise in many scenarios, including training RNNs, tuning hyperparameters through unrolled optimization, and training learned optimizers. Current approaches to optimizing parameters in such computation graphs suffer from high variance gradients, bias, slow updates, or large memory usage. We introduce a method called Persistent Evolution Strategies (PES), which divides the computation graph into a series of truncated unrolls, and performs an evolution strategies-based update step after each unroll. PES eliminates bias from these truncations by accumulating correction terms over the entire sequence of unrolls. PES allows for rapid parameter updates, has low memory usage, is unbiased, and has reasonable variance characteristics. We experimentally demonstrate the advantages of PES compared to several other methods for gradient estimation on synthetic tasks, and show its applicability to training learned optimizers and tuning hyperparameters.}\n}", "pdf": "http://proceedings.mlr.press/v139/vicol21a/vicol21a.pdf", "supp": "", "pdf_size": 1307645, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7115169472626776691&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "University of Toronto + Google Brain; Google Brain; Google Brain", "aff_domain": "cs.toronto.edu; ; ", "email": "cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/vicol21a.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "University of Toronto;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://www.utoronto.ca;https://brain.google.com", "aff_unique_abbr": "U of T;Google Brain", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "Canada;United States" }, { "title": "Uncertainty Principles of Encoding GANs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9053", "id": "9053", "proceeding": "http://proceedings.mlr.press/v139/feng21c.html", "slides": "", "author_site": "Ruili Feng, Zhouchen Lin, Jiapeng Zhu, Deli Zhao, Jingren Zhou, Zheng-Jun Zha", "author": "Ruili Feng; Zhouchen Lin; Jiapeng Zhu; Deli Zhao; Jingren Zhou; Zheng-Jun Zha", "abstract": "The compelling synthesis results of Generative Adversarial Networks (GANs) demonstrate rich semantic knowledge in their latent codes. To obtain this knowledge for downstream applications, encoding GANs has been proposed to learn encoders, such that real world data can be encoded to latent codes, which can be fed to generators to reconstruct those data. However, despite the theoretical guarantees of precise reconstruction in previous works, current algorithms generally reconstruct inputs with non-negligible deviations from inputs. In this paper we study this predicament of encoding GANs, which is indispensable research for the GAN community. We prove three uncertainty principles of encoding GANs in practice: a) the \u2018perfect\u2019 encoder and generator cannot be continuous at the same time, which implies that current framework of encoding GANs is ill-posed and needs rethinking; b) neural networks cannot approximate the underlying encoder and generator precisely at the same time, which explains why we cannot get \u2018perfect\u2019 encoders and generators as promised in previous theories; c) neural networks cannot be stable and accurate at the same time, which demonstrates the difficulty of training and trade-off between fidelity and disentanglement encountered in previous works. Our work may eliminate gaps between previous theories and empirical results, promote the understanding of GANs, and guide network designs for follow-up works.", "bibtex": "@InProceedings{pmlr-v139-feng21c,\n title = \t {Uncertainty Principles of Encoding GANs},\n author = {Feng, Ruili and Lin, Zhouchen and Zhu, Jiapeng and Zhao, Deli and Zhou, Jingren and Zha, Zheng-Jun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3240--3251},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21c/feng21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21c.html},\n abstract = \t {The compelling synthesis results of Generative Adversarial Networks (GANs) demonstrate rich semantic knowledge in their latent codes. To obtain this knowledge for downstream applications, encoding GANs has been proposed to learn encoders, such that real world data can be encoded to latent codes, which can be fed to generators to reconstruct those data. However, despite the theoretical guarantees of precise reconstruction in previous works, current algorithms generally reconstruct inputs with non-negligible deviations from inputs. In this paper we study this predicament of encoding GANs, which is indispensable research for the GAN community. We prove three uncertainty principles of encoding GANs in practice: a) the \u2018perfect\u2019 encoder and generator cannot be continuous at the same time, which implies that current framework of encoding GANs is ill-posed and needs rethinking; b) neural networks cannot approximate the underlying encoder and generator precisely at the same time, which explains why we cannot get \u2018perfect\u2019 encoders and generators as promised in previous theories; c) neural networks cannot be stable and accurate at the same time, which demonstrates the difficulty of training and trade-off between fidelity and disentanglement encountered in previous works. Our work may eliminate gaps between previous theories and empirical results, promote the understanding of GANs, and guide network designs for follow-up works.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21c/feng21c.pdf", "supp": "", "pdf_size": 4625672, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9192811500408949378&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Science and Technology of China, Hefei, China; Key Lab. of Machine Perception (MoE), School of EECS, Peking University, Beijing, China + Pazhou Lab, Guangzhou, China; Hong Kong University of Science and Technology, Hong Kong, China; Alibaba Group; Alibaba Group; University of Science and Technology of China, Hefei, China", "aff_domain": "mail.ustc.edu.cn;pku.edu.cn; ; ; ;ustc.edu.cn", "email": "mail.ustc.edu.cn;pku.edu.cn; ; ; ;ustc.edu.cn", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/feng21c.html", "aff_unique_index": "0;1+2;3;4;4;0", "aff_unique_norm": "University of Science and Technology of China;Peking University;Pazhou Lab;Hong Kong University of Science and Technology;Alibaba Group", "aff_unique_dep": ";School of EECS;;;", "aff_unique_url": "http://www.ustc.edu.cn;http://www.pku.edu.cn;;https://www.ust.hk;https://www.alibaba.com", "aff_unique_abbr": "USTC;PKU;;HKUST;Alibaba", "aff_campus_unique_index": "0;1+2;3;0", "aff_campus_unique": "Hefei;Beijing;Guangzhou;Hong Kong;", "aff_country_unique_index": "0;0+0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Uncertainty Weighted Actor-Critic for Offline Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8869", "id": "8869", "proceeding": "http://proceedings.mlr.press/v139/wu21i.html", "slides": "", "author_site": "Yue Wu, Shuangfei Zhai, Nitish Srivastava, Joshua M Susskind, Jian Zhang, Ruslan Salakhutdinov, Hanlin Goh", "author": "Yue Wu; Shuangfei Zhai; Nitish Srivastava; Joshua M Susskind; Jian Zhang; Ruslan Salakhutdinov; Hanlin Goh", "abstract": "Offline Reinforcement Learning promises to learn effective policies from previously-collected, static datasets without the need for exploration. However, existing Q-learning and actor-critic based off-policy RL algorithms fail when bootstrapping from out-of-distribution (OOD) actions or states. We hypothesize that a key missing ingredient from the existing methods is a proper treatment of uncertainty in the offline setting. We propose Uncertainty Weighted Actor-Critic (UWAC), an algorithm that detects OOD state-action pairs and down-weights their contribution in the training objectives accordingly. Implementation-wise, we adopt a practical and effective dropout-based uncertainty estimation method that introduces very little overhead over existing RL algorithms. Empirically, we observe that UWAC substantially improves model stability during training. In addition, UWAC out-performs existing offline RL methods on a variety of competitive tasks, and achieves significant performance gains over the state-of-the-art baseline on datasets with sparse demonstrations collected from human experts.", "bibtex": "@InProceedings{pmlr-v139-wu21i,\n title = \t {Uncertainty Weighted Actor-Critic for Offline Reinforcement Learning},\n author = {Wu, Yue and Zhai, Shuangfei and Srivastava, Nitish and Susskind, Joshua M and Zhang, Jian and Salakhutdinov, Ruslan and Goh, Hanlin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11319--11328},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wu21i/wu21i.pdf},\n url = \t {https://proceedings.mlr.press/v139/wu21i.html},\n abstract = \t {Offline Reinforcement Learning promises to learn effective policies from previously-collected, static datasets without the need for exploration. However, existing Q-learning and actor-critic based off-policy RL algorithms fail when bootstrapping from out-of-distribution (OOD) actions or states. We hypothesize that a key missing ingredient from the existing methods is a proper treatment of uncertainty in the offline setting. We propose Uncertainty Weighted Actor-Critic (UWAC), an algorithm that detects OOD state-action pairs and down-weights their contribution in the training objectives accordingly. Implementation-wise, we adopt a practical and effective dropout-based uncertainty estimation method that introduces very little overhead over existing RL algorithms. Empirically, we observe that UWAC substantially improves model stability during training. In addition, UWAC out-performs existing offline RL methods on a variety of competitive tasks, and achieves significant performance gains over the state-of-the-art baseline on datasets with sparse demonstrations collected from human experts.}\n}", "pdf": "http://proceedings.mlr.press/v139/wu21i/wu21i.pdf", "supp": "", "pdf_size": 9043542, "gs_citation": 224, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4444134390271190858&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Apple Inc.+Carnegie Mellon University; Apple Inc.; Apple Inc.; Apple Inc.; Apple Inc.; Carnegie Mellon University; Apple Inc.", "aff_domain": "andrew.cmu.edu; ; ; ; ; ; ", "email": "andrew.cmu.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/wu21i.html", "aff_unique_index": "0+1;0;0;0;0;1;0", "aff_unique_norm": "Apple;Carnegie Mellon University", "aff_unique_dep": "Apple Inc.;", "aff_unique_url": "https://www.apple.com;https://www.cmu.edu", "aff_unique_abbr": "Apple;CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Uncovering the Connections Between Adversarial Transferability and Knowledge Transferability", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8745", "id": "8745", "proceeding": "http://proceedings.mlr.press/v139/liang21b.html", "slides": "", "author_site": "Kaizhao Liang, Yibo Zhang, Boxin Wang, Zhuolin Yang, Sanmi Koyejo, Bo Li", "author": "Kaizhao Liang; Jacky Y Zhang; Boxin Wang; Zhuolin Yang; Sanmi Koyejo; Bo Li", "abstract": "Knowledge transferability, or transfer learning, has been widely adopted to allow a pre-trained model in the source domain to be effectively adapted to downstream tasks in the target domain. It is thus important to explore and understand the factors affecting knowledge transferability. In this paper, as the first work, we analyze and demonstrate the connections between knowledge transferability and another important phenomenon\u2013adversarial transferability, \\emph{i.e.}, adversarial examples generated against one model can be transferred to attack other models. Our theoretical studies show that adversarial transferability indicates knowledge transferability, and vice versa. Moreover, based on the theoretical insights, we propose two practical adversarial transferability metrics to characterize this process, serving as bidirectional indicators between adversarial and knowledge transferability. We conduct extensive experiments for different scenarios on diverse datasets, showing a positive correlation between adversarial transferability and knowledge transferability. Our findings will shed light on future research about effective knowledge transfer learning and adversarial transferability analyses.", "bibtex": "@InProceedings{pmlr-v139-liang21b,\n title = \t {Uncovering the Connections Between Adversarial Transferability and Knowledge Transferability},\n author = {Liang, Kaizhao and Zhang, Jacky Y and Wang, Boxin and Yang, Zhuolin and Koyejo, Sanmi and Li, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6577--6587},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liang21b/liang21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/liang21b.html},\n abstract = \t {Knowledge transferability, or transfer learning, has been widely adopted to allow a pre-trained model in the source domain to be effectively adapted to downstream tasks in the target domain. It is thus important to explore and understand the factors affecting knowledge transferability. In this paper, as the first work, we analyze and demonstrate the connections between knowledge transferability and another important phenomenon\u2013adversarial transferability, \\emph{i.e.}, adversarial examples generated against one model can be transferred to attack other models. Our theoretical studies show that adversarial transferability indicates knowledge transferability, and vice versa. Moreover, based on the theoretical insights, we propose two practical adversarial transferability metrics to characterize this process, serving as bidirectional indicators between adversarial and knowledge transferability. We conduct extensive experiments for different scenarios on diverse datasets, showing a positive correlation between adversarial transferability and knowledge transferability. Our findings will shed light on future research about effective knowledge transfer learning and adversarial transferability analyses.}\n}", "pdf": "http://proceedings.mlr.press/v139/liang21b/liang21b.pdf", "supp": "", "pdf_size": 2043181, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4417489739103343011&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, the University of Illinois at Urbana-Champaign; Department of Computer Science, the University of Illinois at Urbana-Champaign; Department of Computer Science, the University of Illinois at Urbana-Champaign; Department of Computer Science, the University of Illinois at Urbana-Champaign; Department of Computer Science, the University of Illinois at Urbana-Champaign; Department of Computer Science, the University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu;illinois.edu; ; ; ; ", "email": "illinois.edu;illinois.edu; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/liang21b.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Understanding Failures in Out-of-Distribution Detection with Deep Generative Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9421", "id": "9421", "proceeding": "http://proceedings.mlr.press/v139/zhang21g.html", "slides": "", "author_site": "Lily Zhang, Mark Goldstein, Rajesh Ranganath", "author": "Lily Zhang; Mark Goldstein; Rajesh Ranganath", "abstract": "Deep generative models (DGMs) seem a natural fit for detecting out-of-distribution (OOD) inputs, but such models have been shown to assign higher probabilities or densities to OOD images than images from the training distribution. In this work, we explain why this behavior should be attributed to model misestimation. We first prove that no method can guarantee performance beyond random chance without assumptions on which out-distributions are relevant. We then interrogate the typical set hypothesis, the claim that relevant out-distributions can lie in high likelihood regions of the data distribution, and that OOD detection should be defined based on the data distribution\u2019s typical set. We highlight the consequences implied by assuming support overlap between in- and out-distributions, as well as the arbitrariness of the typical set for OOD detection. Our results suggest that estimation error is a more plausible explanation than the misalignment between likelihood-based OOD detection and out-distributions of interest, and we illustrate how even minimal estimation error can lead to OOD detection failures, yielding implications for future work in deep generative modeling and OOD detection.", "bibtex": "@InProceedings{pmlr-v139-zhang21g,\n title = \t {Understanding Failures in Out-of-Distribution Detection with Deep Generative Models},\n author = {Zhang, Lily and Goldstein, Mark and Ranganath, Rajesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12427--12436},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21g/zhang21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21g.html},\n abstract = \t {Deep generative models (DGMs) seem a natural fit for detecting out-of-distribution (OOD) inputs, but such models have been shown to assign higher probabilities or densities to OOD images than images from the training distribution. In this work, we explain why this behavior should be attributed to model misestimation. We first prove that no method can guarantee performance beyond random chance without assumptions on which out-distributions are relevant. We then interrogate the typical set hypothesis, the claim that relevant out-distributions can lie in high likelihood regions of the data distribution, and that OOD detection should be defined based on the data distribution\u2019s typical set. We highlight the consequences implied by assuming support overlap between in- and out-distributions, as well as the arbitrariness of the typical set for OOD detection. Our results suggest that estimation error is a more plausible explanation than the misalignment between likelihood-based OOD detection and out-distributions of interest, and we illustrate how even minimal estimation error can lead to OOD detection failures, yielding implications for future work in deep generative modeling and OOD detection.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21g/zhang21g.pdf", "supp": "", "pdf_size": 0, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11936149115961368288&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": ";;", "aff_domain": ";;", "email": ";;", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21g.html" }, { "title": "Understanding Instance-Level Label Noise: Disparate Impacts and Treatments", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10229", "id": "10229", "proceeding": "http://proceedings.mlr.press/v139/liu21a.html", "slides": "", "author": "Yang Liu", "abstract": "This paper aims to provide understandings for the effect of an over-parameterized model, e.g. a deep neural network, memorizing instance-dependent noisy labels. We first quantify the harms caused by memorizing noisy instances, and show the disparate impacts of noisy labels for sample instances with different representation frequencies. We then analyze how several popular solutions for learning with noisy labels mitigate this harm at the instance level. Our analysis reveals that existing approaches lead to disparate treatments when handling noisy instances. While higher-frequency instances often enjoy a high probability of an improvement by applying these solutions, lower-frequency instances do not. Our analysis reveals new understandings for when these approaches work, and provides theoretical justifications for previously reported empirical observations. This observation requires us to rethink the distribution of label noise across instances and calls for different treatments for instances in different regimes.", "bibtex": "@InProceedings{pmlr-v139-liu21a,\n title = \t {Understanding Instance-Level Label Noise: Disparate Impacts and Treatments},\n author = {Liu, Yang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6725--6735},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21a/liu21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21a.html},\n abstract = \t {This paper aims to provide understandings for the effect of an over-parameterized model, e.g. a deep neural network, memorizing instance-dependent noisy labels. We first quantify the harms caused by memorizing noisy instances, and show the disparate impacts of noisy labels for sample instances with different representation frequencies. We then analyze how several popular solutions for learning with noisy labels mitigate this harm at the instance level. Our analysis reveals that existing approaches lead to disparate treatments when handling noisy instances. While higher-frequency instances often enjoy a high probability of an improvement by applying these solutions, lower-frequency instances do not. Our analysis reveals new understandings for when these approaches work, and provides theoretical justifications for previously reported empirical observations. This observation requires us to rethink the distribution of label noise across instances and calls for different treatments for instances in different regimes.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21a/liu21a.pdf", "supp": "", "pdf_size": 1476307, "gs_citation": 44, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6418187437164718013&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science and Engineering, University of California, Santa Cruz, CA, USA", "aff_domain": "ucsc.edu", "email": "ucsc.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/liu21a.html", "aff_unique_index": "0", "aff_unique_norm": "University of California, Santa Cruz", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsc.edu", "aff_unique_abbr": "UCSC", "aff_campus_unique_index": "0", "aff_campus_unique": "Santa Cruz", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Understanding Invariance via Feedforward Inversion of Discriminatively Trained Classifiers", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10083", "id": "10083", "proceeding": "http://proceedings.mlr.press/v139/teterwak21a.html", "slides": "/media/icml-2021/Slides/10083.pdf", "author_site": "Piotr Teterwak, Chiyuan Zhang, Dilip Krishnan, Michael Mozer", "author": "Piotr Teterwak; Chiyuan Zhang; Dilip Krishnan; Michael C Mozer", "abstract": "A discriminatively trained neural net classifier can fit the training data perfectly if all information about its input other than class membership has been discarded prior to the output layer. Surprisingly, past research has discovered that some extraneous visual detail remains in the unnormalized logits. This finding is based on inversion techniques that map deep embeddings back to images. We explore this phenomenon further using a novel synthesis of methods, yielding a feedforward inversion model that produces remarkably high fidelity reconstructions, qualitatively superior to those of past efforts. When applied to an adversarially robust classifier model, the reconstructions contain sufficient local detail and global structure that they might be confused with the original image in a quick glance, and the object category can clearly be gleaned from the reconstruction. Our approach is based on BigGAN (Brock, 2019), with conditioning on logits instead of one-hot class labels. We use our reconstruction model as a tool for exploring the nature of representations, including: the influence of model architecture and training objectives (specifically robust losses), the forms of invariance that networks achieve, representational differences between correctly and incorrectly classified images, and the effects of manipulating logits and images. We believe that our method can inspire future investigations into the nature of information flow in a neural net and can provide diagnostics for improving discriminative models. We provide pre-trained models and visualizations at \\url{https://sites.google.com/view/understanding-invariance/home}.", "bibtex": "@InProceedings{pmlr-v139-teterwak21a,\n title = \t {Understanding Invariance via Feedforward Inversion of Discriminatively Trained Classifiers},\n author = {Teterwak, Piotr and Zhang, Chiyuan and Krishnan, Dilip and Mozer, Michael C},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10225--10235},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/teterwak21a/teterwak21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/teterwak21a.html},\n abstract = \t {A discriminatively trained neural net classifier can fit the training data perfectly if all information about its input other than class membership has been discarded prior to the output layer. Surprisingly, past research has discovered that some extraneous visual detail remains in the unnormalized logits. This finding is based on inversion techniques that map deep embeddings back to images. We explore this phenomenon further using a novel synthesis of methods, yielding a feedforward inversion model that produces remarkably high fidelity reconstructions, qualitatively superior to those of past efforts. When applied to an adversarially robust classifier model, the reconstructions contain sufficient local detail and global structure that they might be confused with the original image in a quick glance, and the object category can clearly be gleaned from the reconstruction. Our approach is based on BigGAN (Brock, 2019), with conditioning on logits instead of one-hot class labels. We use our reconstruction model as a tool for exploring the nature of representations, including: the influence of model architecture and training objectives (specifically robust losses), the forms of invariance that networks achieve, representational differences between correctly and incorrectly classified images, and the effects of manipulating logits and images. We believe that our method can inspire future investigations into the nature of information flow in a neural net and can provide diagnostics for improving discriminative models. We provide pre-trained models and visualizations at \\url{https://sites.google.com/view/understanding-invariance/home}.}\n}", "pdf": "http://proceedings.mlr.press/v139/teterwak21a/teterwak21a.pdf", "supp": "", "pdf_size": 2475139, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3106180593646811989&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Boston University; Google Research; Google Research; University of Colorado, Boulder", "aff_domain": "bu.edu; ; ; ", "email": "bu.edu; ; ; ", "github": "", "project": "https://sites.google.com/view/understanding-invariance/home", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/teterwak21a.html", "aff_unique_index": "0;1;1;2", "aff_unique_norm": "Boston University;Google;University of Colorado", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.bu.edu;https://research.google;https://www.colorado.edu", "aff_unique_abbr": "BU;Google Research;CU", "aff_campus_unique_index": "1;1;2", "aff_campus_unique": ";Mountain View;Boulder", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Understanding Noise Injection in GANs", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8863", "id": "8863", "proceeding": "http://proceedings.mlr.press/v139/feng21g.html", "slides": "", "author_site": "Ruili Feng, Deli Zhao, Zheng-Jun Zha", "author": "Ruili Feng; Deli Zhao; Zheng-Jun Zha", "abstract": "Noise injection is an effective way of circumventing overfitting and enhancing generalization in machine learning, the rationale of which has been validated in deep learning as well. Recently, noise injection exhibits surprising effectiveness when generating high-fidelity images in Generative Adversarial Networks (GANs) (e.g. StyleGAN). Despite its successful applications in GANs, the mechanism of its validity is still unclear. In this paper, we propose a geometric framework to theoretically analyze the role of noise injection in GANs. First, we point out the existence of the adversarial dimension trap inherent in GANs, which leads to the difficulty of learning a proper generator. Second, we successfully model the noise injection framework with exponential maps based on Riemannian geometry. Guided by our theories, we propose a general geometric realization for noise injection. Under our novel framework, the simple noise injection used in StyleGAN reduces to the Euclidean case. The goal of our work is to make theoretical steps towards understanding the underlying mechanism of state-of-the-art GAN algorithms. Experiments on image generation and GAN inversion validate our theory in practice.", "bibtex": "@InProceedings{pmlr-v139-feng21g,\n title = \t {Understanding Noise Injection in GANs},\n author = {Feng, Ruili and Zhao, Deli and Zha, Zheng-Jun},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3284--3293},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/feng21g/feng21g.pdf},\n url = \t {https://proceedings.mlr.press/v139/feng21g.html},\n abstract = \t {Noise injection is an effective way of circumventing overfitting and enhancing generalization in machine learning, the rationale of which has been validated in deep learning as well. Recently, noise injection exhibits surprising effectiveness when generating high-fidelity images in Generative Adversarial Networks (GANs) (e.g. StyleGAN). Despite its successful applications in GANs, the mechanism of its validity is still unclear. In this paper, we propose a geometric framework to theoretically analyze the role of noise injection in GANs. First, we point out the existence of the adversarial dimension trap inherent in GANs, which leads to the difficulty of learning a proper generator. Second, we successfully model the noise injection framework with exponential maps based on Riemannian geometry. Guided by our theories, we propose a general geometric realization for noise injection. Under our novel framework, the simple noise injection used in StyleGAN reduces to the Euclidean case. The goal of our work is to make theoretical steps towards understanding the underlying mechanism of state-of-the-art GAN algorithms. Experiments on image generation and GAN inversion validate our theory in practice.}\n}", "pdf": "http://proceedings.mlr.press/v139/feng21g/feng21g.pdf", "supp": "", "pdf_size": 2637066, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3370906824435788962&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "University of Science and Technology of China, Hefei, China; Alibaba Group; University of Science and Technology of China, Hefei, China", "aff_domain": "gmail.com;gmail.com;ustc.edu.cn", "email": "gmail.com;gmail.com;ustc.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/feng21g.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Science and Technology of China;Alibaba Group", "aff_unique_dep": ";", "aff_unique_url": "http://www.ustc.edu.cn;https://www.alibaba.com", "aff_unique_abbr": "USTC;Alibaba", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hefei;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Understanding and Mitigating Accuracy Disparity in Regression", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10023", "id": "10023", "proceeding": "http://proceedings.mlr.press/v139/chi21a.html", "slides": "/media/icml-2021/Slides/10023.pdf", "author_site": "Jianfeng Chi, Yuan Tian, Geoff Gordon, Han Zhao", "author": "Jianfeng Chi; Yuan Tian; Geoffrey J. Gordon; Han Zhao", "abstract": "With the widespread deployment of large-scale prediction systems in high-stakes domains, e.g., face recognition, criminal justice, etc., disparity on prediction accuracy between different demographic subgroups has called for fundamental understanding on the source of such disparity and algorithmic intervention to mitigate it. In this paper, we study the accuracy disparity problem in regression. To begin with, we first propose an error decomposition theorem, which decomposes the accuracy disparity into the distance between marginal label distributions and the distance between conditional representations, to help explain why such accuracy disparity appears in practice. Motivated by this error decomposition and the general idea of distribution alignment with statistical distances, we then propose an algorithm to reduce this disparity, and analyze its game-theoretic optima of the proposed objective functions. To corroborate our theoretical findings, we also conduct experiments on five benchmark datasets. The experimental results suggest that our proposed algorithms can effectively mitigate accuracy disparity while maintaining the predictive power of the regression models.", "bibtex": "@InProceedings{pmlr-v139-chi21a,\n title = \t {Understanding and Mitigating Accuracy Disparity in Regression},\n author = {Chi, Jianfeng and Tian, Yuan and Gordon, Geoffrey J. and Zhao, Han},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1866--1876},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chi21a/chi21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chi21a.html},\n abstract = \t {With the widespread deployment of large-scale prediction systems in high-stakes domains, e.g., face recognition, criminal justice, etc., disparity on prediction accuracy between different demographic subgroups has called for fundamental understanding on the source of such disparity and algorithmic intervention to mitigate it. In this paper, we study the accuracy disparity problem in regression. To begin with, we first propose an error decomposition theorem, which decomposes the accuracy disparity into the distance between marginal label distributions and the distance between conditional representations, to help explain why such accuracy disparity appears in practice. Motivated by this error decomposition and the general idea of distribution alignment with statistical distances, we then propose an algorithm to reduce this disparity, and analyze its game-theoretic optima of the proposed objective functions. To corroborate our theoretical findings, we also conduct experiments on five benchmark datasets. The experimental results suggest that our proposed algorithms can effectively mitigate accuracy disparity while maintaining the predictive power of the regression models.}\n}", "pdf": "http://proceedings.mlr.press/v139/chi21a/chi21a.pdf", "supp": "", "pdf_size": 1328110, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9962646376890451048&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of Virginia; Department of Computer Science, University of Virginia; Machine Learning Department, Carnegie Mellon University; Department of Computer Science, University of Illinois at Urbana-Champaign", "aff_domain": "virginia.com; ; ;illinois.edu", "email": "virginia.com; ; ;illinois.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/chi21a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "University of Virginia;Carnegie Mellon University;University of Illinois Urbana-Champaign", "aff_unique_dep": "Department of Computer Science;Machine Learning Department;Department of Computer Science", "aff_unique_url": "https://www.virginia.edu;https://www.cmu.edu;https://illinois.edu", "aff_unique_abbr": "UVA;CMU;UIUC", "aff_campus_unique_index": "1", "aff_campus_unique": ";Urbana-Champaign", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Understanding self-supervised learning dynamics without contrastive pairs", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10403", "id": "10403", "proceeding": "http://proceedings.mlr.press/v139/tian21a.html", "slides": "/media/icml-2021/Slides/10403.pdf", "author_site": "Yuandong Tian, Xinlei Chen, Surya Ganguli", "author": "Yuandong Tian; Xinlei Chen; Surya Ganguli", "abstract": "While contrastive approaches of self-supervised learning (SSL) learn representations by minimizing the distance between two augmented views of the same data point (positive pairs) and maximizing views from different data points (negative pairs), recent \\emph{non-contrastive} SSL (e.g., BYOL and SimSiam) show remarkable performance {\\it without} negative pairs, with an extra learnable predictor and a stop-gradient operation. A fundamental question rises: why they do not collapse into trivial representation? In this paper, we answer this question via a simple theoretical study and propose a novel approach, \\ourmethod{}, that \\emph{directly} sets the linear predictor based on the statistics of its inputs, rather than trained with gradient update. On ImageNet, it performs comparably with more complex two-layer non-linear predictors that employ BatchNorm and outperforms linear predictor by $2.5%$ in 300-epoch training (and $5%$ in 60-epoch). \\ourmethod{} is motivated by our theoretical study of the nonlinear learning dynamics of non-contrastive SSL in simple linear networks. Our study yields conceptual insights into how non-contrastive SSL methods learn, how they avoid representational collapse, and how multiple factors, like predictor networks, stop-gradients, exponential moving averages, and weight decay all come into play. Our simple theory recapitulates the results of real-world ablation studies in both STL-10 and ImageNet. Code is released\\footnote{\\url{https://github.com/facebookresearch/luckmatters/tree/master/ssl}}.", "bibtex": "@InProceedings{pmlr-v139-tian21a,\n title = \t {Understanding self-supervised learning dynamics without contrastive pairs},\n author = {Tian, Yuandong and Chen, Xinlei and Ganguli, Surya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10268--10278},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tian21a/tian21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tian21a.html},\n abstract = \t {While contrastive approaches of self-supervised learning (SSL) learn representations by minimizing the distance between two augmented views of the same data point (positive pairs) and maximizing views from different data points (negative pairs), recent \\emph{non-contrastive} SSL (e.g., BYOL and SimSiam) show remarkable performance {\\it without} negative pairs, with an extra learnable predictor and a stop-gradient operation. A fundamental question rises: why they do not collapse into trivial representation? In this paper, we answer this question via a simple theoretical study and propose a novel approach, \\ourmethod{}, that \\emph{directly} sets the linear predictor based on the statistics of its inputs, rather than trained with gradient update. On ImageNet, it performs comparably with more complex two-layer non-linear predictors that employ BatchNorm and outperforms linear predictor by $2.5%$ in 300-epoch training (and $5%$ in 60-epoch). \\ourmethod{} is motivated by our theoretical study of the nonlinear learning dynamics of non-contrastive SSL in simple linear networks. Our study yields conceptual insights into how non-contrastive SSL methods learn, how they avoid representational collapse, and how multiple factors, like predictor networks, stop-gradients, exponential moving averages, and weight decay all come into play. Our simple theory recapitulates the results of real-world ablation studies in both STL-10 and ImageNet. Code is released\\footnote{\\url{https://github.com/facebookresearch/luckmatters/tree/master/ssl}}.}\n}", "pdf": "http://proceedings.mlr.press/v139/tian21a/tian21a.pdf", "supp": "", "pdf_size": 4358508, "gs_citation": 349, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12706315897595648306&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Facebook AI Research; Facebook AI Research; Stanford University", "aff_domain": "fb.com; ; ", "email": "fb.com; ; ", "github": "https://github.com/facebookresearch/luckmatters/tree/master/ssl", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/tian21a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Meta;Stanford University", "aff_unique_dep": "Facebook AI Research;", "aff_unique_url": "https://research.facebook.com;https://www.stanford.edu", "aff_unique_abbr": "FAIR;Stanford", "aff_campus_unique_index": "1", "aff_campus_unique": ";Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Understanding the Dynamics of Gradient Flow in Overparameterized Linear models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9573", "id": "9573", "proceeding": "http://proceedings.mlr.press/v139/tarmoun21a.html", "slides": "", "author_site": "Salma Tarmoun, Guilherme Franca, Benjamin Haeffele, Rene Vidal", "author": "Salma Tarmoun; Guilherme Franca; Benjamin D Haeffele; Rene Vidal", "abstract": "We provide a detailed analysis of the dynamics ofthe gradient flow in overparameterized two-layerlinear models. A particularly interesting featureof this model is that its nonlinear dynamics can beexactly solved as a consequence of a large num-ber of conservation laws that constrain the systemto follow particular trajectories. More precisely,the gradient flow preserves the difference of theGramian matrices of the input and output weights,and its convergence to equilibrium depends onboth the magnitude of that difference (which isfixed at initialization) and the spectrum of the data.In addition, and generalizing prior work, we proveour results without assuming small, balanced orspectral initialization for the weights. Moreover,we establish interesting mathematical connectionsbetween matrix factorization problems and differ-ential equations of the Riccati type.", "bibtex": "@InProceedings{pmlr-v139-tarmoun21a,\n title = \t {Understanding the Dynamics of Gradient Flow in Overparameterized Linear models},\n author = {Tarmoun, Salma and Franca, Guilherme and Haeffele, Benjamin D and Vidal, Rene},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10153--10161},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/tarmoun21a/tarmoun21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/tarmoun21a.html},\n abstract = \t {We provide a detailed analysis of the dynamics ofthe gradient flow in overparameterized two-layerlinear models. A particularly interesting featureof this model is that its nonlinear dynamics can beexactly solved as a consequence of a large num-ber of conservation laws that constrain the systemto follow particular trajectories. More precisely,the gradient flow preserves the difference of theGramian matrices of the input and output weights,and its convergence to equilibrium depends onboth the magnitude of that difference (which isfixed at initialization) and the spectrum of the data.In addition, and generalizing prior work, we proveour results without assuming small, balanced orspectral initialization for the weights. Moreover,we establish interesting mathematical connectionsbetween matrix factorization problems and differ-ential equations of the Riccati type.}\n}", "pdf": "http://proceedings.mlr.press/v139/tarmoun21a/tarmoun21a.pdf", "supp": "", "pdf_size": 558011, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16514669319671245015&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "Mathematical Institute for Data Science, Johns Hopkins University + Department of Applied Mathematics and Statistics, Johns Hopkins University; Mathematical Institute for Data Science, Johns Hopkins University + Computer Science Division, University of California, Berkeley; Mathematical Institute for Data Science, Johns Hopkins University + Department of Biomedical Engineering, Johns Hopkins University; Mathematical Institute for Data Science, Johns Hopkins University + Department of Biomedical Engineering, Johns Hopkins University", "aff_domain": "jhu.edu;gmail.com;jhu.edu;jhu.edu", "email": "jhu.edu;gmail.com;jhu.edu;jhu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/tarmoun21a.html", "aff_unique_index": "0+0;0+1;0+0;0+0", "aff_unique_norm": "Johns Hopkins University;University of California, Berkeley", "aff_unique_dep": "Mathematical Institute for Data Science;Computer Science Division", "aff_unique_url": "https://www.jhu.edu;https://www.berkeley.edu", "aff_unique_abbr": "JHU;UC Berkeley", "aff_campus_unique_index": ";1;;", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "United States" }, { "title": "UneVEn: Universal Value Exploration for Multi-Agent Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10297", "id": "10297", "proceeding": "http://proceedings.mlr.press/v139/gupta21a.html", "slides": "", "author_site": "Tarun Gupta, Anuj Mahajan, Bei Peng, Wendelin Boehmer, Shimon Whiteson", "author": "Tarun Gupta; Anuj Mahajan; Bei Peng; Wendelin Boehmer; Shimon Whiteson", "abstract": "VDN and QMIX are two popular value-based algorithms for cooperative MARL that learn a centralized action value function as a monotonic mixing of per-agent utilities. While this enables easy decentralization of the learned policy, the restricted joint action value function can prevent them from solving tasks that require significant coordination between agents at a given timestep. We show that this problem can be overcome by improving the joint exploration of all agents during training. Specifically, we propose a novel MARL approach called Universal Value Exploration (UneVEn) that learns a set of related tasks simultaneously with a linear decomposition of universal successor features. With the policies of already solved related tasks, the joint exploration process of all agents can be improved to help them achieve better coordination. Empirical results on a set of exploration games, challenging cooperative predator-prey tasks requiring significant coordination among agents, and StarCraft II micromanagement benchmarks show that UneVEn can solve tasks where other state-of-the-art MARL methods fail.", "bibtex": "@InProceedings{pmlr-v139-gupta21a,\n title = \t {UneVEn: Universal Value Exploration for Multi-Agent Reinforcement Learning},\n author = {Gupta, Tarun and Mahajan, Anuj and Peng, Bei and Boehmer, Wendelin and Whiteson, Shimon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3930--3941},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gupta21a/gupta21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/gupta21a.html},\n abstract = \t {VDN and QMIX are two popular value-based algorithms for cooperative MARL that learn a centralized action value function as a monotonic mixing of per-agent utilities. While this enables easy decentralization of the learned policy, the restricted joint action value function can prevent them from solving tasks that require significant coordination between agents at a given timestep. We show that this problem can be overcome by improving the joint exploration of all agents during training. Specifically, we propose a novel MARL approach called Universal Value Exploration (UneVEn) that learns a set of related tasks simultaneously with a linear decomposition of universal successor features. With the policies of already solved related tasks, the joint exploration process of all agents can be improved to help them achieve better coordination. Empirical results on a set of exploration games, challenging cooperative predator-prey tasks requiring significant coordination among agents, and StarCraft II micromanagement benchmarks show that UneVEn can solve tasks where other state-of-the-art MARL methods fail.}\n}", "pdf": "http://proceedings.mlr.press/v139/gupta21a/gupta21a.pdf", "supp": "", "pdf_size": 2974550, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5502927213602777519&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Software Technology, Delft University of Technology, Delft, Netherlands; Department of Computer Science, University of Oxford, Oxford, United Kingdom", "aff_domain": "cs.ox.ac.uk; ; ; ; ", "email": "cs.ox.ac.uk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/gupta21a.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "University of Oxford;Delft University of Technology", "aff_unique_dep": "Department of Computer Science;Department of Software Technology", "aff_unique_url": "https://www.ox.ac.uk;https://www.tudelft.nl", "aff_unique_abbr": "Oxford;TUDelft", "aff_campus_unique_index": "0;0;0;1;0", "aff_campus_unique": "Oxford;Delft", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "United Kingdom;Netherlands" }, { "title": "UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10507", "id": "10507", "proceeding": "http://proceedings.mlr.press/v139/wang21y.html", "slides": "", "author_site": "Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang", "author": "Chengyi Wang; Yu Wu; Yao Qian; Kenichi Kumatani; Shujie Liu; Furu Wei; Michael Zeng; Xuedong Huang", "abstract": "In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both labeled and unlabeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 26.9% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also verified on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.", "bibtex": "@InProceedings{pmlr-v139-wang21y,\n title = \t {UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data},\n author = {Wang, Chengyi and Wu, Yu and Qian, Yao and Kumatani, Kenichi and Liu, Shujie and Wei, Furu and Zeng, Michael and Huang, Xuedong},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10937--10947},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21y/wang21y.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21y.html},\n abstract = \t {In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both labeled and unlabeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 26.9% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also verified on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21y/wang21y.pdf", "supp": "", "pdf_size": 564533, "gs_citation": 144, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13435266557122878220&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Nankai University; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft", "aff_domain": "mail.nankai.edu.cn;microsoft.com;microsoft.com; ; ; ; ; ", "email": "mail.nankai.edu.cn;microsoft.com;microsoft.com; ; ; ; ; ", "github": "https://github.com/cywang97/unispeech", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/wang21y.html", "aff_unique_index": "0;1;1;1;1;1;1;1", "aff_unique_norm": "Nankai University;Microsoft", "aff_unique_dep": ";Microsoft Corporation", "aff_unique_url": "http://www.nankai.edu.cn;https://www.microsoft.com", "aff_unique_abbr": "NKU;Microsoft", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;1;1;1;1;1", "aff_country_unique": "China;United States" }, { "title": "Unified Robust Semi-Supervised Variational Autoencoder", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9043", "id": "9043", "proceeding": "http://proceedings.mlr.press/v139/chen21a.html", "slides": "", "author": "Xu Chen", "abstract": "In this paper, we propose a novel noise-robust semi-supervised deep generative model by jointly tackling noisy labels and outliers simultaneously in a unified robust semi-supervised variational autoencoder (URSVAE). Typically, the uncertainty of of input data is characterized by placing uncertainty prior on the parameters of the probability density distributions in order to ensure the robustness of the variational encoder towards outliers. Subsequently, a noise transition model is integrated naturally into our model to alleviate the detrimental effects of noisy labels. Moreover, a robust divergence measure is employed to further enhance the robustness, where a novel variational lower bound is derived and optimized to infer the network parameters. By proving the influence function on the proposed evidence lower bound is bounded, the enormous potential of the proposed model in the classification in the presence of the compound noise is demonstrated. The experimental results highlight the superiority of the proposed framework by the evaluating on image classification tasks and comparing with the state-of-the-art approaches.", "bibtex": "@InProceedings{pmlr-v139-chen21a,\n title = \t {Unified Robust Semi-Supervised Variational Autoencoder},\n author = {Chen, Xu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1529--1538},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21a/chen21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21a.html},\n abstract = \t {In this paper, we propose a novel noise-robust semi-supervised deep generative model by jointly tackling noisy labels and outliers simultaneously in a unified robust semi-supervised variational autoencoder (URSVAE). Typically, the uncertainty of of input data is characterized by placing uncertainty prior on the parameters of the probability density distributions in order to ensure the robustness of the variational encoder towards outliers. Subsequently, a noise transition model is integrated naturally into our model to alleviate the detrimental effects of noisy labels. Moreover, a robust divergence measure is employed to further enhance the robustness, where a novel variational lower bound is derived and optimized to infer the network parameters. By proving the influence function on the proposed evidence lower bound is bounded, the enormous potential of the proposed model in the classification in the presence of the compound noise is demonstrated. The experimental results highlight the superiority of the proposed framework by the evaluating on image classification tasks and comparing with the state-of-the-art approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21a/chen21a.pdf", "supp": "", "pdf_size": 1058425, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:N1YVBO6pWjUJ:scholar.google.com/&scioq=Unified+Robust+Semi-Supervised+Variational+Autoencoder&hl=en&as_sdt=0,5", "gs_version_total": 3, "aff": "Cary, NC, USA", "aff_domain": "gmail.com", "email": "gmail.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/chen21a.html", "aff_unique_index": "0", "aff_unique_norm": "Cary", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Uniform Convergence, Adversarial Spheres and a Simple Remedy", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8795", "id": "8795", "proceeding": "http://proceedings.mlr.press/v139/bachmann21a.html", "slides": "", "author_site": "Gregor Bachmann, Seyed Moosavi, Thomas Hofmann", "author": "Gregor Bachmann; Seyed-Mohsen Moosavi-Dezfooli; Thomas Hofmann", "abstract": "Previous work has cast doubt on the general framework of uniform convergence and its ability to explain generalization in neural networks. By considering a specific dataset, it was observed that a neural network completely misclassifies a projection of the training data (adversarial set), rendering any existing generalization bound based on uniform convergence vacuous. We provide an extensive theoretical investigation of the previously studied data setting through the lens of infinitely-wide models. We prove that the Neural Tangent Kernel (NTK) also suffers from the same phenomenon and we uncover its origin. We highlight the important role of the output bias and show theoretically as well as empirically how a sensible choice completely mitigates the problem. We identify sharp phase transitions in the accuracy on the adversarial set and study its dependency on the training sample size. As a result, we are able to characterize critical sample sizes beyond which the effect disappears. Moreover, we study decompositions of a neural network into a clean and noisy part by considering its canonical decomposition into its different eigenfunctions and show empirically that for too small bias the adversarial phenomenon still persists.", "bibtex": "@InProceedings{pmlr-v139-bachmann21a,\n title = \t {Uniform Convergence, Adversarial Spheres and a Simple Remedy},\n author = {Bachmann, Gregor and Moosavi-Dezfooli, Seyed-Mohsen and Hofmann, Thomas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {490--499},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bachmann21a/bachmann21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bachmann21a.html},\n abstract = \t {Previous work has cast doubt on the general framework of uniform convergence and its ability to explain generalization in neural networks. By considering a specific dataset, it was observed that a neural network completely misclassifies a projection of the training data (adversarial set), rendering any existing generalization bound based on uniform convergence vacuous. We provide an extensive theoretical investigation of the previously studied data setting through the lens of infinitely-wide models. We prove that the Neural Tangent Kernel (NTK) also suffers from the same phenomenon and we uncover its origin. We highlight the important role of the output bias and show theoretically as well as empirically how a sensible choice completely mitigates the problem. We identify sharp phase transitions in the accuracy on the adversarial set and study its dependency on the training sample size. As a result, we are able to characterize critical sample sizes beyond which the effect disappears. Moreover, we study decompositions of a neural network into a clean and noisy part by considering its canonical decomposition into its different eigenfunctions and show empirically that for too small bias the adversarial phenomenon still persists.}\n}", "pdf": "http://proceedings.mlr.press/v139/bachmann21a/bachmann21a.pdf", "supp": "", "pdf_size": 4538068, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14083880770674085314&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, ETH Z\u00fcrich; Department of Computer Science, ETH Z\u00fcrich; Department of Computer Science, ETH Z\u00fcrich", "aff_domain": "inf.ethz.ch; ; ", "email": "inf.ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/bachmann21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Unifying Vision-and-Language Tasks via Text Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9961", "id": "9961", "proceeding": "http://proceedings.mlr.press/v139/cho21a.html", "slides": "/media/icml-2021/Slides/9961.pdf", "author_site": "Jaemin Cho, Jie Lei, Hao Tan, Mohit Bansal", "author": "Jaemin Cho; Jie Lei; Hao Tan; Mohit Bansal", "abstract": "Existing methods for vision-and-language learning typically require designing task-specific architectures and objectives for each task. For example, a multi-label answer classifier for visual question answering, a region scorer for referring expression comprehension, and a language decoder for image captioning, etc. To alleviate these hassles, in this work, we propose a unified framework that learns different tasks in a single architecture with the same language modeling objective, i.e., multimodal conditional text generation, where our models learn to generate labels in text based on the visual and textual inputs. On 7 popular vision-and-language benchmarks, including visual question answering, referring expression comprehension, visual commonsense reasoning, most of which have been previously modeled as discriminative tasks, our generative approach (with a single unified architecture) reaches comparable performance to recent task-specific state-of-the-art vision-and-language models. Moreover, our generative approach shows better generalization ability on questions that have rare answers. Also, we show that our framework allows multi-task learning in a single architecture with a single set of parameters, achieving similar performance to separately optimized single-task models. Our code is publicly available at: https://github.com/j-min/VL-T5", "bibtex": "@InProceedings{pmlr-v139-cho21a,\n title = \t {Unifying Vision-and-Language Tasks via Text Generation},\n author = {Cho, Jaemin and Lei, Jie and Tan, Hao and Bansal, Mohit},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1931--1942},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/cho21a/cho21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/cho21a.html},\n abstract = \t {Existing methods for vision-and-language learning typically require designing task-specific architectures and objectives for each task. For example, a multi-label answer classifier for visual question answering, a region scorer for referring expression comprehension, and a language decoder for image captioning, etc. To alleviate these hassles, in this work, we propose a unified framework that learns different tasks in a single architecture with the same language modeling objective, i.e., multimodal conditional text generation, where our models learn to generate labels in text based on the visual and textual inputs. On 7 popular vision-and-language benchmarks, including visual question answering, referring expression comprehension, visual commonsense reasoning, most of which have been previously modeled as discriminative tasks, our generative approach (with a single unified architecture) reaches comparable performance to recent task-specific state-of-the-art vision-and-language models. Moreover, our generative approach shows better generalization ability on questions that have rare answers. Also, we show that our framework allows multi-task learning in a single architecture with a single set of parameters, achieving similar performance to separately optimized single-task models. Our code is publicly available at: https://github.com/j-min/VL-T5}\n}", "pdf": "http://proceedings.mlr.press/v139/cho21a/cho21a.pdf", "supp": "", "pdf_size": 4294323, "gs_citation": 601, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17951690001214387773&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "UNC Chapel Hill; UNC Chapel Hill; UNC Chapel Hill; UNC Chapel Hill", "aff_domain": "cs.unc.edu;cs.unc.edu;cs.unc.edu;cs.unc.edu", "email": "cs.unc.edu;cs.unc.edu;cs.unc.edu;cs.unc.edu", "github": "https://github.com/j-min/VL-T5", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/cho21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of North Carolina at Chapel Hill", "aff_unique_dep": "", "aff_unique_url": "https://www.unc.edu", "aff_unique_abbr": "UNC", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Chapel Hill", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Unitary Branching Programs: Learnability and Lower Bounds", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10679", "id": "10679", "proceeding": "http://proceedings.mlr.press/v139/andino21a.html", "slides": "/media/icml-2021/Slides/10679_SDnBO4x.pdf", "author_site": "Fidel Ernesto Diaz Andino, Maria Kokkou, Mateus de Oliveira Oliveira, Farhad Vadiee", "author": "Fidel Ernesto Diaz Andino; Maria Kokkou; Mateus De Oliveira Oliveira; Farhad Vadiee", "abstract": "Bounded width branching programs are a formalism that can be used to capture the notion of non-uniform constant-space computation. In this work, we study a generalized version of bounded width branching programs where instructions are defined by unitary matrices of bounded dimension. We introduce a new learning framework for these branching programs that leverages on a combination of local search techniques with gradient descent over Riemannian manifolds. We also show that gapped, read-once branching programs of bounded dimension can be learned with a polynomial number of queries in the presence of a teacher. Finally, we provide explicit near-quadratic size lower-bounds for bounded-dimension unitary branching programs, and exponential size lower-bounds for bounded-dimension read-once gapped unitary branching programs. The first lower bound is proven using a combination of Neciporuk\u2019s lower bound technique with classic results from algebraic geometry. The second lower bound is proven within the framework of communication complexity theory.", "bibtex": "@InProceedings{pmlr-v139-andino21a,\n title = \t {Unitary Branching Programs: Learnability and Lower Bounds},\n author = {Andino, Fidel Ernesto Diaz and Kokkou, Maria and De Oliveira Oliveira, Mateus and Vadiee, Farhad},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {297--306},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/andino21a/andino21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/andino21a.html},\n abstract = \t {Bounded width branching programs are a formalism that can be used to capture the notion of non-uniform constant-space computation. In this work, we study a generalized version of bounded width branching programs where instructions are defined by unitary matrices of bounded dimension. We introduce a new learning framework for these branching programs that leverages on a combination of local search techniques with gradient descent over Riemannian manifolds. We also show that gapped, read-once branching programs of bounded dimension can be learned with a polynomial number of queries in the presence of a teacher. Finally, we provide explicit near-quadratic size lower-bounds for bounded-dimension unitary branching programs, and exponential size lower-bounds for bounded-dimension read-once gapped unitary branching programs. The first lower bound is proven using a combination of Neciporuk\u2019s lower bound technique with classic results from algebraic geometry. The second lower bound is proven within the framework of communication complexity theory.}\n}", "pdf": "http://proceedings.mlr.press/v139/andino21a/andino21a.pdf", "supp": "", "pdf_size": 402906, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14143012617393561431&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "University of S \u02dcao Paulo, S \u02dcao Paulo, Brazil; Chalmers University of Technology, Gothenburg, Sweden; University of Bergen, Bergen, Norway; University of Bergen, Bergen, Norway", "aff_domain": "uib.no; ; ; ", "email": "uib.no; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/andino21a.html", "aff_unique_index": "0;1;2;2", "aff_unique_norm": "University of Sao Paulo;Chalmers University of Technology;University of Bergen", "aff_unique_dep": ";;", "aff_unique_url": "https://www.usp.br;https://www.chalmers.se;https://www.uib.no", "aff_unique_abbr": "USP;Chalmers;uib", "aff_campus_unique_index": "0;1;2;2", "aff_campus_unique": "Sao Paulo;Gothenburg;Bergen", "aff_country_unique_index": "0;1;2;2", "aff_country_unique": "Brazil;Sweden;Norway" }, { "title": "Unsupervised Co-part Segmentation through Assembly", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9615", "id": "9615", "proceeding": "http://proceedings.mlr.press/v139/gao21c.html", "slides": "", "author_site": "Qingzhe Gao, Bin Wang, Libin Liu, Baoquan Chen", "author": "Qingzhe Gao; Bin Wang; Libin Liu; Baoquan Chen", "abstract": "Co-part segmentation is an important problem in computer vision for its rich applications. We propose an unsupervised learning approach for co-part segmentation from images. For the training stage, we leverage motion information embedded in videos and explicitly extract latent representations to segment meaningful object parts. More importantly, we introduce a dual procedure of part-assembly to form a closed loop with part-segmentation, enabling an effective self-supervision. We demonstrate the effectiveness of our approach with a host of extensive experiments, ranging from human bodies, hands, quadruped, and robot arms. We show that our approach can achieve meaningful and compact part segmentation, outperforming state-of-the-art approaches on diverse benchmarks.", "bibtex": "@InProceedings{pmlr-v139-gao21c,\n title = \t {Unsupervised Co-part Segmentation through Assembly},\n author = {Gao, Qingzhe and Wang, Bin and Liu, Libin and Chen, Baoquan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3576--3586},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/gao21c/gao21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/gao21c.html},\n abstract = \t {Co-part segmentation is an important problem in computer vision for its rich applications. We propose an unsupervised learning approach for co-part segmentation from images. For the training stage, we leverage motion information embedded in videos and explicitly extract latent representations to segment meaningful object parts. More importantly, we introduce a dual procedure of part-assembly to form a closed loop with part-segmentation, enabling an effective self-supervision. We demonstrate the effectiveness of our approach with a host of extensive experiments, ranging from human bodies, hands, quadruped, and robot arms. We show that our approach can achieve meaningful and compact part segmentation, outperforming state-of-the-art approaches on diverse benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v139/gao21c/gao21c.pdf", "supp": "", "pdf_size": 3357359, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11164401170119653450&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/gao21c.html" }, { "title": "Unsupervised Embedding Adaptation via Early-Stage Feature Reconstruction for Few-Shot Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9617", "id": "9617", "proceeding": "http://proceedings.mlr.press/v139/lee21d.html", "slides": "/media/icml-2021/Slides/9617.pdf", "author_site": "Dong Hoon Lee, Sae-Young Chung", "author": "Dong Hoon Lee; Sae-Young Chung", "abstract": "We propose unsupervised embedding adaptation for the downstream few-shot classification task. Based on findings that deep neural networks learn to generalize before memorizing, we develop Early-Stage Feature Reconstruction (ESFR) \u2014 a novel adaptation scheme with feature reconstruction and dimensionality-driven early stopping that finds generalizable features. Incorporating ESFR consistently improves the performance of baseline methods on all standard settings, including the recently proposed transductive method. ESFR used in conjunction with the transductive method further achieves state-of-the-art performance on mini-ImageNet, tiered-ImageNet, and CUB; especially with 1.2%\u00a02.0% improvements in accuracy over the previous best performing method on 1-shot setting.", "bibtex": "@InProceedings{pmlr-v139-lee21d,\n title = \t {Unsupervised Embedding Adaptation via Early-Stage Feature Reconstruction for Few-Shot Classification},\n author = {Lee, Dong Hoon and Chung, Sae-Young},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6098--6108},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lee21d/lee21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/lee21d.html},\n abstract = \t {We propose unsupervised embedding adaptation for the downstream few-shot classification task. Based on findings that deep neural networks learn to generalize before memorizing, we develop Early-Stage Feature Reconstruction (ESFR) \u2014 a novel adaptation scheme with feature reconstruction and dimensionality-driven early stopping that finds generalizable features. Incorporating ESFR consistently improves the performance of baseline methods on all standard settings, including the recently proposed transductive method. ESFR used in conjunction with the transductive method further achieves state-of-the-art performance on mini-ImageNet, tiered-ImageNet, and CUB; especially with 1.2%\u00a02.0% improvements in accuracy over the previous best performing method on 1-shot setting.}\n}", "pdf": "http://proceedings.mlr.press/v139/lee21d/lee21d.pdf", "supp": "", "pdf_size": 584883, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16796057083006115935&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST); School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST)", "aff_domain": "kaist.ac.kr; ", "email": "kaist.ac.kr; ", "github": "https://github.com/movinghoon/ESFR", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/lee21d.html", "aff_unique_index": "0;0", "aff_unique_norm": "Korea Advanced Institute of Science and Technology", "aff_unique_dep": "School of Electrical Engineering", "aff_unique_url": "https://www.kaist.ac.kr", "aff_unique_abbr": "KAIST", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "South Korea" }, { "title": "Unsupervised Learning of Visual 3D Keypoints for Control", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9719", "id": "9719", "proceeding": "http://proceedings.mlr.press/v139/chen21b.html", "slides": "/media/icml-2021/Slides/9719_cylzX8a.pdf", "author_site": "Boyuan Chen, Pieter Abbeel, Deepak Pathak", "author": "Boyuan Chen; Pieter Abbeel; Deepak Pathak", "abstract": "Learning sensorimotor control policies from high-dimensional images crucially relies on the quality of the underlying visual representations. Prior works show that structured latent space such as visual keypoints often outperforms unstructured representations for robotic control. However, most of these representations, whether structured or unstructured are learned in a 2D space even though the control tasks are usually performed in a 3D environment. In this work, we propose a framework to learn such a 3D geometric structure directly from images in an end-to-end unsupervised manner. The input images are embedded into latent 3D keypoints via a differentiable encoder which is trained to optimize both a multi-view consistency loss and downstream task objective. These discovered 3D keypoints tend to meaningfully capture robot joints as well as object movements in a consistent manner across both time and 3D space. The proposed approach outperforms prior state-of-art methods across a variety of reinforcement learning benchmarks. Code and videos at https://buoyancy99.github.io/unsup-3d-keypoints/.", "bibtex": "@InProceedings{pmlr-v139-chen21b,\n title = \t {Unsupervised Learning of Visual 3D Keypoints for Control},\n author = {Chen, Boyuan and Abbeel, Pieter and Pathak, Deepak},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1539--1549},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21b/chen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21b.html},\n abstract = \t {Learning sensorimotor control policies from high-dimensional images crucially relies on the quality of the underlying visual representations. Prior works show that structured latent space such as visual keypoints often outperforms unstructured representations for robotic control. However, most of these representations, whether structured or unstructured are learned in a 2D space even though the control tasks are usually performed in a 3D environment. In this work, we propose a framework to learn such a 3D geometric structure directly from images in an end-to-end unsupervised manner. The input images are embedded into latent 3D keypoints via a differentiable encoder which is trained to optimize both a multi-view consistency loss and downstream task objective. These discovered 3D keypoints tend to meaningfully capture robot joints as well as object movements in a consistent manner across both time and 3D space. The proposed approach outperforms prior state-of-art methods across a variety of reinforcement learning benchmarks. Code and videos at https://buoyancy99.github.io/unsup-3d-keypoints/.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21b/chen21b.pdf", "supp": "", "pdf_size": 3502351, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7013737531012764740&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "UC Berkeley; UC Berkeley; Carnegie Mellon University", "aff_domain": "berkeley.edu;berkeley.edu;cs.cmu.edu", "email": "berkeley.edu;berkeley.edu;cs.cmu.edu", "github": "https://github.com/buoyancy99/unsup-3d-keypoints", "project": "https://buoyancy99.github.io/unsup-3d-keypoints/", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chen21b.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Berkeley;Carnegie Mellon University", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://www.cmu.edu", "aff_unique_abbr": "UC Berkeley;CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Unsupervised Part Representation by Flow Capsules", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10591", "id": "10591", "proceeding": "http://proceedings.mlr.press/v139/sabour21a.html", "slides": "", "author_site": "Sara Sabour Rouh Aghdam, Andrea Tagliasacchi, Soroosh Yazdani, Geoffrey Hinton, David Fleet", "author": "Sara Sabour; Andrea Tagliasacchi; Soroosh Yazdani; Geoffrey Hinton; David J Fleet", "abstract": "Capsule networks aim to parse images into a hierarchy of objects, parts and relations. While promising, they remain limited by an inability to learn effective low level part descriptions. To address this issue we propose a way to learn primary capsule encoders that detect atomic parts from a single image. During training we exploit motion as a powerful perceptual cue for part definition, with an expressive decoder for part generation within a layered image model with occlusion. Experiments demonstrate robust part discovery in the presence of multiple objects, cluttered backgrounds, and occlusion. The learned part decoder is shown to infer the underlying shape masks, effectively filling in occluded regions of the detected shapes. We evaluate FlowCapsules on unsupervised part segmentation and unsupervised image classification.", "bibtex": "@InProceedings{pmlr-v139-sabour21a,\n title = \t {Unsupervised Part Representation by Flow Capsules},\n author = {Sabour, Sara and Tagliasacchi, Andrea and Yazdani, Soroosh and Hinton, Geoffrey and Fleet, David J},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9213--9223},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sabour21a/sabour21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/sabour21a.html},\n abstract = \t {Capsule networks aim to parse images into a hierarchy of objects, parts and relations. While promising, they remain limited by an inability to learn effective low level part descriptions. To address this issue we propose a way to learn primary capsule encoders that detect atomic parts from a single image. During training we exploit motion as a powerful perceptual cue for part definition, with an expressive decoder for part generation within a layered image model with occlusion. Experiments demonstrate robust part discovery in the presence of multiple objects, cluttered backgrounds, and occlusion. The learned part decoder is shown to infer the underlying shape masks, effectively filling in occluded regions of the detected shapes. We evaluate FlowCapsules on unsupervised part segmentation and unsupervised image classification.}\n}", "pdf": "http://proceedings.mlr.press/v139/sabour21a/sabour21a.pdf", "supp": "", "pdf_size": 3843558, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14892811339910406288&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "1Google Research, Brain Team + 2Department of Computer Science, University of Toronto; 1Google Research, Brain Team + 2Department of Computer Science, University of Toronto; 1Google Research, Brain Team; 1Google Research, Brain Team + 2Department of Computer Science, University of Toronto; 1Google Research, Brain Team + 2Department of Computer Science, University of Toronto", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/sabour21a.html", "aff_unique_index": "0+1;0+1;0;0+1;0+1", "aff_unique_norm": "Google;University of Toronto", "aff_unique_dep": "Google Research;Department of Computer Science", "aff_unique_url": "https://research.google;https://www.utoronto.ca", "aff_unique_abbr": "Google;U of T", "aff_campus_unique_index": "0+1;0+1;0;0+1;0+1", "aff_campus_unique": "Mountain View;Toronto", "aff_country_unique_index": "0+1;0+1;0;0+1;0+1", "aff_country_unique": "United States;Canada" }, { "title": "Unsupervised Representation Learning via Neural Activation Coding", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10239", "id": "10239", "proceeding": "http://proceedings.mlr.press/v139/park21b.html", "slides": "/media/icml-2021/Slides/10239.pdf", "author_site": "Yookoon Park, Sangho Lee, Gunhee Kim, David Blei", "author": "Yookoon Park; Sangho Lee; Gunhee Kim; David Blei", "abstract": "We present neural activation coding (NAC) as a novel approach for learning deep representations from unlabeled data for downstream applications. We argue that the deep encoder should maximize its nonlinear expressivity on the data for downstream predictors to take full advantage of its representation power. To this end, NAC maximizes the mutual information between activation patterns of the encoder and the data over a noisy communication channel. We show that learning for a noise-robust activation code increases the number of distinct linear regions of ReLU encoders, hence the maximum nonlinear expressivity. More interestingly, NAC learns both continuous and discrete representations of data, which we respectively evaluate on two downstream tasks: (i) linear classification on CIFAR-10 and ImageNet-1K and (ii) nearest neighbor retrieval on CIFAR-10 and FLICKR-25K. Empirical results show that NAC attains better or comparable performance on both tasks over recent baselines including SimCLR and DistillHash. In addition, NAC pretraining provides significant benefits to the training of deep generative models. Our code is available at https://github.com/yookoon/nac.", "bibtex": "@InProceedings{pmlr-v139-park21b,\n title = \t {Unsupervised Representation Learning via Neural Activation Coding},\n author = {Park, Yookoon and Lee, Sangho and Kim, Gunhee and Blei, David},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8391--8400},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/park21b/park21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/park21b.html},\n abstract = \t {We present neural activation coding (NAC) as a novel approach for learning deep representations from unlabeled data for downstream applications. We argue that the deep encoder should maximize its nonlinear expressivity on the data for downstream predictors to take full advantage of its representation power. To this end, NAC maximizes the mutual information between activation patterns of the encoder and the data over a noisy communication channel. We show that learning for a noise-robust activation code increases the number of distinct linear regions of ReLU encoders, hence the maximum nonlinear expressivity. More interestingly, NAC learns both continuous and discrete representations of data, which we respectively evaluate on two downstream tasks: (i) linear classification on CIFAR-10 and ImageNet-1K and (ii) nearest neighbor retrieval on CIFAR-10 and FLICKR-25K. Empirical results show that NAC attains better or comparable performance on both tasks over recent baselines including SimCLR and DistillHash. In addition, NAC pretraining provides significant benefits to the training of deep generative models. Our code is available at https://github.com/yookoon/nac.}\n}", "pdf": "http://proceedings.mlr.press/v139/park21b/park21b.pdf", "supp": "", "pdf_size": 699182, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3527585526812184622&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Computer Science Department, Columbia University, New York, USA; Department of Computer Science and Engineering, Seoul National University, Seoul, South Korea; Department of Computer Science and Engineering, Seoul National University, Seoul, South Korea; Computer Science Department, Columbia University, New York, USA", "aff_domain": "columbia.edu; ; ; ", "email": "columbia.edu; ; ; ", "github": "https://github.com/yookoon/nac", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/park21b.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "Columbia University;Seoul National University", "aff_unique_dep": "Computer Science Department;Department of Computer Science and Engineering", "aff_unique_url": "https://www.columbia.edu;https://www.snu.ac.kr", "aff_unique_abbr": "Columbia;SNU", "aff_campus_unique_index": "0;1;1;0", "aff_campus_unique": "New York;Seoul", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "United States;South Korea" }, { "title": "Unsupervised Skill Discovery with Bottleneck Option Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8531", "id": "8531", "proceeding": "http://proceedings.mlr.press/v139/kim21j.html", "slides": "", "author_site": "Jaekyeom Kim, Seohong Park, Gunhee Kim", "author": "Jaekyeom Kim; Seohong Park; Gunhee Kim", "abstract": "Having the ability to acquire inherent skills from environments without any external rewards or supervision like humans is an important problem. We propose a novel unsupervised skill discovery method named Information Bottleneck Option Learning (IBOL). On top of the linearization of environments that promotes more various and distant state transitions, IBOL enables the discovery of diverse skills. It provides the abstraction of the skills learned with the information bottleneck framework for the options with improved stability and encouraged disentanglement. We empirically demonstrate that IBOL outperforms multiple state-of-the-art unsupervised skill discovery methods on the information-theoretic evaluations and downstream tasks in MuJoCo environments, including Ant, HalfCheetah, Hopper and D\u2019Kitty. Our code is available at https://vision.snu.ac.kr/projects/ibol.", "bibtex": "@InProceedings{pmlr-v139-kim21j,\n title = \t {Unsupervised Skill Discovery with Bottleneck Option Learning},\n author = {Kim, Jaekyeom and Park, Seohong and Kim, Gunhee},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5572--5582},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21j/kim21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21j.html},\n abstract = \t {Having the ability to acquire inherent skills from environments without any external rewards or supervision like humans is an important problem. We propose a novel unsupervised skill discovery method named Information Bottleneck Option Learning (IBOL). On top of the linearization of environments that promotes more various and distant state transitions, IBOL enables the discovery of diverse skills. It provides the abstraction of the skills learned with the information bottleneck framework for the options with improved stability and encouraged disentanglement. We empirically demonstrate that IBOL outperforms multiple state-of-the-art unsupervised skill discovery methods on the information-theoretic evaluations and downstream tasks in MuJoCo environments, including Ant, HalfCheetah, Hopper and D\u2019Kitty. Our code is available at https://vision.snu.ac.kr/projects/ibol.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21j/kim21j.pdf", "supp": "", "pdf_size": 8490460, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2474291061858386960&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Engineering, Seoul National University, South Korea; Department of Computer Science and Engineering, Seoul National University, South Korea; Department of Computer Science and Engineering, Seoul National University, South Korea", "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr", "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr", "github": "", "project": "https://vision.snu.ac.kr/projects/ibol", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kim21j.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Seoul National University", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.snu.ac.kr", "aff_unique_abbr": "SNU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "South Korea" }, { "title": "Valid Causal Inference with (Some) Invalid Instruments", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10139", "id": "10139", "proceeding": "http://proceedings.mlr.press/v139/hartford21a.html", "slides": "/media/icml-2021/Slides/10139.pdf", "author_site": "Jason Hartford, Victor Veitch, Dhanya Sridhar, Kevin Leyton-Brown", "author": "Jason S Hartford; Victor Veitch; Dhanya Sridhar; Kevin Leyton-Brown", "abstract": "Instrumental variable methods provide a powerful approach to estimating causal effects in the presence of unobserved confounding. But a key challenge when applying them is the reliance on untestable \"exclusion\" assumptions that rule out any relationship between the instrument variable and the response that is not mediated by the treatment. In this paper, we show how to perform consistent IV estimation despite violations of the exclusion assumption. In particular, we show that when one has multiple candidate instruments, only a majority of these candidates\u2014or, more generally, the modal candidate-response relationship\u2014needs to be valid to estimate the causal effect. Our approach uses an estimate of the modal prediction from an ensemble of instrumental variable estimators. The technique is simple to apply and is \"black-box\" in the sense that it may be used with any instrumental variable estimator as long as the treatment effect is identified for each valid instrument independently. As such, it is compatible with recent machine-learning based estimators that allow for the estimation of conditional average treatment effects (CATE) on complex, high dimensional data. Experimentally, we achieve accurate estimates of conditional average treatment effects using an ensemble of deep network-based estimators, including on a challenging simulated Mendelian Randomization problem.", "bibtex": "@InProceedings{pmlr-v139-hartford21a,\n title = \t {Valid Causal Inference with (Some) Invalid Instruments},\n author = {Hartford, Jason S and Veitch, Victor and Sridhar, Dhanya and Leyton-Brown, Kevin},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4096--4106},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/hartford21a/hartford21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/hartford21a.html},\n abstract = \t {Instrumental variable methods provide a powerful approach to estimating causal effects in the presence of unobserved confounding. But a key challenge when applying them is the reliance on untestable \"exclusion\" assumptions that rule out any relationship between the instrument variable and the response that is not mediated by the treatment. In this paper, we show how to perform consistent IV estimation despite violations of the exclusion assumption. In particular, we show that when one has multiple candidate instruments, only a majority of these candidates\u2014or, more generally, the modal candidate-response relationship\u2014needs to be valid to estimate the causal effect. Our approach uses an estimate of the modal prediction from an ensemble of instrumental variable estimators. The technique is simple to apply and is \"black-box\" in the sense that it may be used with any instrumental variable estimator as long as the treatment effect is identified for each valid instrument independently. As such, it is compatible with recent machine-learning based estimators that allow for the estimation of conditional average treatment effects (CATE) on complex, high dimensional data. Experimentally, we achieve accurate estimates of conditional average treatment effects using an ensemble of deep network-based estimators, including on a challenging simulated Mendelian Randomization problem.}\n}", "pdf": "http://proceedings.mlr.press/v139/hartford21a/hartford21a.pdf", "supp": "", "pdf_size": 2367817, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11913541440371630140&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "University of British Columbia; University of Chicago; Columbia University; University of British Columbia", "aff_domain": "cs.ubc.ca; ; ; ", "email": "cs.ubc.ca; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/hartford21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University of British Columbia;University of Chicago;Columbia University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ubc.ca;https://www.uchicago.edu;https://www.columbia.edu", "aff_unique_abbr": "UBC;UChicago;Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;0", "aff_country_unique": "Canada;United States" }, { "title": "Value Alignment Verification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9547", "id": "9547", "proceeding": "http://proceedings.mlr.press/v139/brown21a.html", "slides": "", "author_site": "Daniel Brown, Jordan Schneider, Anca Dragan, Scott Niekum", "author": "Daniel S Brown; Jordan Schneider; Anca Dragan; Scott Niekum", "abstract": "As humans interact with autonomous agents to perform increasingly complicated, potentially risky tasks, it is important to be able to efficiently evaluate an agent\u2019s performance and correctness. In this paper we formalize and theoretically analyze the problem of efficient value alignment verification: how to efficiently test whether the behavior of another agent is aligned with a human\u2019s values? The goal is to construct a kind of \"driver\u2019s test\" that a human can give to any agent which will verify value alignment via a minimal number of queries. We study alignment verification problems with both idealized humans that have an explicit reward function as well as problems where they have implicit values. We analyze verification of exact value alignment for rational agents, propose and test heuristics for value alignment verification in gridworlds and a continuous autonomous driving domain, and prove that there exist sufficient conditions such that we can verify epsilon-alignment in any environment via a constant-query-complexity alignment test.", "bibtex": "@InProceedings{pmlr-v139-brown21a,\n title = \t {Value Alignment Verification},\n author = {Brown, Daniel S and Schneider, Jordan and Dragan, Anca and Niekum, Scott},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1105--1115},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/brown21a/brown21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/brown21a.html},\n abstract = \t {As humans interact with autonomous agents to perform increasingly complicated, potentially risky tasks, it is important to be able to efficiently evaluate an agent\u2019s performance and correctness. In this paper we formalize and theoretically analyze the problem of efficient value alignment verification: how to efficiently test whether the behavior of another agent is aligned with a human\u2019s values? The goal is to construct a kind of \"driver\u2019s test\" that a human can give to any agent which will verify value alignment via a minimal number of queries. We study alignment verification problems with both idealized humans that have an explicit reward function as well as problems where they have implicit values. We analyze verification of exact value alignment for rational agents, propose and test heuristics for value alignment verification in gridworlds and a continuous autonomous driving domain, and prove that there exist sufficient conditions such that we can verify epsilon-alignment in any environment via a constant-query-complexity alignment test.}\n}", "pdf": "http://proceedings.mlr.press/v139/brown21a/brown21a.pdf", "supp": "", "pdf_size": 872569, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5318002618951129429&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of California, Berkeley, USA+1; University of Texas at Austin, USA+2; University of California, Berkeley, USA; University of Texas at Austin, USA", "aff_domain": "berkeley.edu;cs.utexas.edu; ; ", "email": "berkeley.edu;cs.utexas.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/brown21a.html", "aff_unique_index": "0;2;0;2", "aff_unique_norm": "University of California, Berkeley;;University of Texas at Austin", "aff_unique_dep": ";;", "aff_unique_url": "https://www.berkeley.edu;;https://www.utexas.edu", "aff_unique_abbr": "UC Berkeley;;UT Austin", "aff_campus_unique_index": "0;2;0;2", "aff_campus_unique": "Berkeley;;Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States;" }, { "title": "Value Iteration in Continuous Actions, States and Time", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9345", "id": "9345", "proceeding": "http://proceedings.mlr.press/v139/lutter21a.html", "slides": "", "author_site": "Michael Lutter, Shie Mannor, Jan Peters, Dieter Fox, Animesh Garg", "author": "Michael Lutter; Shie Mannor; Jan Peters; Dieter Fox; Animesh Garg", "abstract": "Classical value iteration approaches are not applicable to environments with continuous states and actions. For such environments the states and actions must be discretized, which leads to an exponential increase in computational complexity. In this paper, we propose continuous fitted value iteration (cFVI). This algorithm enables dynamic programming for continuous states and actions with a known dynamics model. Exploiting the continuous time formulation, the optimal policy can be derived for non-linear control-affine dynamics. This closed-form solution enables the efficient extension of value iteration to continuous environments. We show in non-linear control experiments that the dynamic programming solution obtains the same quantitative performance as deep reinforcement learning methods in simulation but excels when transferred to the physical system.The policy obtained by cFVI is more robust to changes in the dynamics despite using only a deterministic model and without explicitly incorporating robustness in the optimization", "bibtex": "@InProceedings{pmlr-v139-lutter21a,\n title = \t {Value Iteration in Continuous Actions, States and Time},\n author = {Lutter, Michael and Mannor, Shie and Peters, Jan and Fox, Dieter and Garg, Animesh},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7224--7234},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lutter21a/lutter21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/lutter21a.html},\n abstract = \t {Classical value iteration approaches are not applicable to environments with continuous states and actions. For such environments the states and actions must be discretized, which leads to an exponential increase in computational complexity. In this paper, we propose continuous fitted value iteration (cFVI). This algorithm enables dynamic programming for continuous states and actions with a known dynamics model. Exploiting the continuous time formulation, the optimal policy can be derived for non-linear control-affine dynamics. This closed-form solution enables the efficient extension of value iteration to continuous environments. We show in non-linear control experiments that the dynamic programming solution obtains the same quantitative performance as deep reinforcement learning methods in simulation but excels when transferred to the physical system.The policy obtained by cFVI is more robust to changes in the dynamics despite using only a deterministic model and without explicitly incorporating robustness in the optimization}\n}", "pdf": "http://proceedings.mlr.press/v139/lutter21a/lutter21a.pdf", "supp": "", "pdf_size": 8660781, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16848657752177903230&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "NVIDIA; Technical University of Darmstadt; Technion, Israel Institute of Technology; University of Washington; University of Toronto & Vector Institute", "aff_domain": "robot-learning.de; ; ; ; ", "email": "robot-learning.de; ; ; ; ", "github": "", "project": "https://sites.google.com/view/value-iteration", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/lutter21a.html", "aff_unique_index": "0;1;2;3;4", "aff_unique_norm": "NVIDIA;Technical University of Darmstadt;Israel Institute of Technology;University of Washington;University of Toronto", "aff_unique_dep": "NVIDIA Corporation;;;;", "aff_unique_url": "https://www.nvidia.com;https://www.tu-darmstadt.de;https://www.technion.ac.il/en/;https://www.washington.edu;https://www.utoronto.ca", "aff_unique_abbr": "NVIDIA;TUD;Technion;UW;U of T", "aff_campus_unique_index": "1", "aff_campus_unique": ";Toronto", "aff_country_unique_index": "0;1;2;0;3", "aff_country_unique": "United States;Germany;Israel;Canada" }, { "title": "Value-at-Risk Optimization with Gaussian Processes", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8793", "id": "8793", "proceeding": "http://proceedings.mlr.press/v139/nguyen21b.html", "slides": "", "author_site": "Quoc Phong Nguyen, Zhongxiang Dai, Bryan Kian Hsiang Low, Patrick Jaillet", "author": "Quoc Phong Nguyen; Zhongxiang Dai; Bryan Kian Hsiang Low; Patrick Jaillet", "abstract": "Value-at-risk (VaR) is an established measure to assess risks in critical real-world applications with random environmental factors. This paper presents a novel VaR upper confidence bound (V-UCB) algorithm for maximizing the VaR of a black-box objective function with the first no-regret guarantee. To realize this, we first derive a confidence bound of VaR and then prove the existence of values of the environmental random variable (to be selected to achieve no regret) such that the confidence bound of VaR lies within that of the objective function evaluated at such values. Our V-UCB algorithm empirically demonstrates state-of-the-art performance in optimizing synthetic benchmark functions, a portfolio optimization problem, and a simulated robot task.", "bibtex": "@InProceedings{pmlr-v139-nguyen21b,\n title = \t {Value-at-Risk Optimization with Gaussian Processes},\n author = {Nguyen, Quoc Phong and Dai, Zhongxiang and Low, Bryan Kian Hsiang and Jaillet, Patrick},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8063--8072},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/nguyen21b/nguyen21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/nguyen21b.html},\n abstract = \t {Value-at-risk (VaR) is an established measure to assess risks in critical real-world applications with random environmental factors. This paper presents a novel VaR upper confidence bound (V-UCB) algorithm for maximizing the VaR of a black-box objective function with the first no-regret guarantee. To realize this, we first derive a confidence bound of VaR and then prove the existence of values of the environmental random variable (to be selected to achieve no regret) such that the confidence bound of VaR lies within that of the objective function evaluated at such values. Our V-UCB algorithm empirically demonstrates state-of-the-art performance in optimizing synthetic benchmark functions, a portfolio optimization problem, and a simulated robot task.}\n}", "pdf": "http://proceedings.mlr.press/v139/nguyen21b/nguyen21b.pdf", "supp": "", "pdf_size": 1343182, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16988502914491538668&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, National University of Singapore, Republic of Singapore; Department of Computer Science, National University of Singapore, Republic of Singapore; Department of Computer Science, National University of Singapore, Republic of Singapore; Department of Electrical Engineering and Computer Science, Massachusetts Institute of Technology, USA", "aff_domain": "comp.nus.edu.sg; ; ; ", "email": "comp.nus.edu.sg; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/nguyen21b.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "National University of Singapore;Massachusetts Institute of Technology", "aff_unique_dep": "Department of Computer Science;Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.nus.edu.sg;https://web.mit.edu", "aff_unique_abbr": "NUS;MIT", "aff_campus_unique_index": "1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "Singapore;United States" }, { "title": "Variance Reduced Training with Stratified Sampling for Forecasting Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8895", "id": "8895", "proceeding": "http://proceedings.mlr.press/v139/lu21d.html", "slides": "", "author_site": "Yucheng Lu, Youngsuk Park, Lifan Chen, Yuyang Wang, Christopher De Sa, Dean Foster", "author": "Yucheng Lu; Youngsuk Park; Lifan Chen; Yuyang Wang; Christopher De Sa; Dean Foster", "abstract": "In large-scale time series forecasting, one often encounters the situation where the temporal patterns of time series, while drifting over time, differ from one another in the same dataset. In this paper, we provably show under such heterogeneity, training a forecasting model with commonly used stochastic optimizers (e.g. SGD) potentially suffers large variance on gradient estimation, and thus incurs long-time training. We show that this issue can be efficiently alleviated via stratification, which allows the optimizer to sample from pre-grouped time series strata. For better trading-off gradient variance and computation complexity, we further propose SCott (Stochastic Stratified Control Variate Gradient Descent), a variance reduced SGD-style optimizer that utilizes stratified sampling via control variate. In theory, we provide the convergence guarantee of SCott on smooth non-convex objectives. Empirically, we evaluate SCott and other baseline optimizers on both synthetic and real-world time series forecasting problems, and demonstrate SCott converges faster with respect to both iterations and wall clock time.", "bibtex": "@InProceedings{pmlr-v139-lu21d,\n title = \t {Variance Reduced Training with Stratified Sampling for Forecasting Models},\n author = {Lu, Yucheng and Park, Youngsuk and Chen, Lifan and Wang, Yuyang and De Sa, Christopher and Foster, Dean},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {7145--7155},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/lu21d/lu21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/lu21d.html},\n abstract = \t {In large-scale time series forecasting, one often encounters the situation where the temporal patterns of time series, while drifting over time, differ from one another in the same dataset. In this paper, we provably show under such heterogeneity, training a forecasting model with commonly used stochastic optimizers (e.g. SGD) potentially suffers large variance on gradient estimation, and thus incurs long-time training. We show that this issue can be efficiently alleviated via stratification, which allows the optimizer to sample from pre-grouped time series strata. For better trading-off gradient variance and computation complexity, we further propose SCott (Stochastic Stratified Control Variate Gradient Descent), a variance reduced SGD-style optimizer that utilizes stratified sampling via control variate. In theory, we provide the convergence guarantee of SCott on smooth non-convex objectives. Empirically, we evaluate SCott and other baseline optimizers on both synthetic and real-world time series forecasting problems, and demonstrate SCott converges faster with respect to both iterations and wall clock time.}\n}", "pdf": "http://proceedings.mlr.press/v139/lu21d/lu21d.pdf", "supp": "", "pdf_size": 711890, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11989475374929010676&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/lu21d.html" }, { "title": "Variance Reduction via Primal-Dual Accelerated Dual Averaging for Nonsmooth Convex Finite-Sums", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8865", "id": "8865", "proceeding": "http://proceedings.mlr.press/v139/song21d.html", "slides": "", "author_site": "Chaobing Song, Stephen Wright, Jelena Diakonikolas", "author": "Chaobing Song; Stephen J Wright; Jelena Diakonikolas", "abstract": "Structured nonsmooth convex finite-sum optimization appears in many machine learning applications, including support vector machines and least absolute deviation. For the primal-dual formulation of this problem, we propose a novel algorithm called \\emph{Variance Reduction via Primal-Dual Accelerated Dual Averaging (\\vrpda)}. In the nonsmooth and general convex setting, \\vrpda\u00a0has the overall complexity $O(nd\\log\\min \\{1/\\epsilon, n\\} + d/\\epsilon )$ in terms of the primal-dual gap, where $n$ denotes the number of samples, $d$ the dimension of the primal variables, and $\\epsilon$ the desired accuracy. In the nonsmooth and strongly convex setting, the overall complexity of \\vrpda\u00a0becomes $O(nd\\log\\min\\{1/\\epsilon, n\\} + d/\\sqrt{\\epsilon})$ in terms of both the primal-dual gap and the distance between iterate and optimal solution. Both these results for \\vrpda\u00a0improve significantly on state-of-the-art complexity estimates\u2014which are $O(nd\\log \\min\\{1/\\epsilon, n\\} + \\sqrt{n}d/\\epsilon)$ for the nonsmooth and general convex setting and $O(nd\\log \\min\\{1/\\epsilon, n\\} + \\sqrt{n}d/\\sqrt{\\epsilon})$ for the nonsmooth and strongly convex setting\u2014with a simpler and more straightforward algorithm and analysis. Moreover, both complexities are better than \\emph{lower} bounds for general convex finite-sum optimization, because our approach makes use of additional, commonly occurring structure. Numerical experiments reveal competitive performance of \\vrpda\u00a0compared to state-of-the-art approaches.", "bibtex": "@InProceedings{pmlr-v139-song21d,\n title = \t {Variance Reduction via Primal-Dual Accelerated Dual Averaging for Nonsmooth Convex Finite-Sums},\n author = {Song, Chaobing and Wright, Stephen J and Diakonikolas, Jelena},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9824--9834},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/song21d/song21d.pdf},\n url = \t {https://proceedings.mlr.press/v139/song21d.html},\n abstract = \t {Structured nonsmooth convex finite-sum optimization appears in many machine learning applications, including support vector machines and least absolute deviation. For the primal-dual formulation of this problem, we propose a novel algorithm called \\emph{Variance Reduction via Primal-Dual Accelerated Dual Averaging (\\vrpda)}. In the nonsmooth and general convex setting, \\vrpda\u00a0has the overall complexity $O(nd\\log\\min \\{1/\\epsilon, n\\} + d/\\epsilon )$ in terms of the primal-dual gap, where $n$ denotes the number of samples, $d$ the dimension of the primal variables, and $\\epsilon$ the desired accuracy. In the nonsmooth and strongly convex setting, the overall complexity of \\vrpda\u00a0becomes $O(nd\\log\\min\\{1/\\epsilon, n\\} + d/\\sqrt{\\epsilon})$ in terms of both the primal-dual gap and the distance between iterate and optimal solution. Both these results for \\vrpda\u00a0improve significantly on state-of-the-art complexity estimates\u2014which are $O(nd\\log \\min\\{1/\\epsilon, n\\} + \\sqrt{n}d/\\epsilon)$ for the nonsmooth and general convex setting and $O(nd\\log \\min\\{1/\\epsilon, n\\} + \\sqrt{n}d/\\sqrt{\\epsilon})$ for the nonsmooth and strongly convex setting\u2014with a simpler and more straightforward algorithm and analysis. Moreover, both complexities are better than \\emph{lower} bounds for general convex finite-sum optimization, because our approach makes use of additional, commonly occurring structure. Numerical experiments reveal competitive performance of \\vrpda\u00a0compared to state-of-the-art approaches.}\n}", "pdf": "http://proceedings.mlr.press/v139/song21d/song21d.pdf", "supp": "", "pdf_size": 1676436, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17309321418589425219&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Sciences, University of Wisconsin-Madison; Department of Computer Sciences, University of Wisconsin-Madison; Department of Computer Sciences, University of Wisconsin-Madison", "aff_domain": "wisc.edu; ; ", "email": "wisc.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/song21d.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Wisconsin-Madison", "aff_unique_dep": "Department of Computer Sciences", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW-Madison", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Variational (Gradient) Estimate of the Score Function in Energy-based Latent Variable Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8663", "id": "8663", "proceeding": "http://proceedings.mlr.press/v139/bao21b.html", "slides": "", "author_site": "Fan Bao, Kun Xu, Chongxuan Li, Lanqing Hong, Jun Zhu, Bo Zhang", "author": "Fan Bao; Kun Xu; Chongxuan Li; Lanqing Hong; Jun Zhu; Bo Zhang", "abstract": "This paper presents new estimates of the score function and its gradient with respect to the model parameters in a general energy-based latent variable model (EBLVM). The score function and its gradient can be expressed as combinations of expectation and covariance terms over the (generally intractable) posterior of the latent variables. New estimates are obtained by introducing a variational posterior to approximate the true posterior in these terms. The variational posterior is trained to minimize a certain divergence (e.g., the KL divergence) between itself and the true posterior. Theoretically, the divergence characterizes upper bounds of the bias of the estimates. In principle, our estimates can be applied to a wide range of objectives, including kernelized Stein discrepancy (KSD), score matching (SM)-based methods and exact Fisher divergence with a minimal model assumption. In particular, these estimates applied to SM-based methods outperform existing methods in learning EBLVMs on several image datasets.", "bibtex": "@InProceedings{pmlr-v139-bao21b,\n title = \t {Variational (Gradient) Estimate of the Score Function in Energy-based Latent Variable Models},\n author = {Bao, Fan and Xu, Kun and Li, Chongxuan and Hong, Lanqing and Zhu, Jun and Zhang, Bo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {651--661},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bao21b/bao21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/bao21b.html},\n abstract = \t {This paper presents new estimates of the score function and its gradient with respect to the model parameters in a general energy-based latent variable model (EBLVM). The score function and its gradient can be expressed as combinations of expectation and covariance terms over the (generally intractable) posterior of the latent variables. New estimates are obtained by introducing a variational posterior to approximate the true posterior in these terms. The variational posterior is trained to minimize a certain divergence (e.g., the KL divergence) between itself and the true posterior. Theoretically, the divergence characterizes upper bounds of the bias of the estimates. In principle, our estimates can be applied to a wide range of objectives, including kernelized Stein discrepancy (KSD), score matching (SM)-based methods and exact Fisher divergence with a minimal model assumption. In particular, these estimates applied to SM-based methods outperform existing methods in learning EBLVMs on several image datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/bao21b/bao21b.pdf", "supp": "", "pdf_size": 2391679, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17355652803431034105&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Dept. of Comp. Sci. & Tech., Institute for AI, THBI Lab, BNRist Center, State Key Lab for Intell. Tech. & Sys., Tsinghua University, Beijing, China+Huawei Noah\u2019s Ark Lab; Dept. of Comp. Sci. & Tech., Institute for AI, THBI Lab, BNRist Center, State Key Lab for Intell. Tech. & Sys., Tsinghua University, Beijing, China; Dept. of Comp. Sci. & Tech., Institute for AI, THBI Lab, BNRist Center, State Key Lab for Intell. Tech. & Sys., Tsinghua University, Beijing, China; Huawei Noah\u2019s Ark Lab; Dept. of Comp. Sci. & Tech., Institute for AI, THBI Lab, BNRist Center, State Key Lab for Intell. Tech. & Sys., Tsinghua University, Beijing, China; Dept. of Comp. Sci. & Tech., Institute for AI, THBI Lab, BNRist Center, State Key Lab for Intell. Tech. & Sys., Tsinghua University, Beijing, China", "aff_domain": "tsinghua.edu.cn; ; ; ;tsinghua.edu.cn; ", "email": "tsinghua.edu.cn; ; ; ;tsinghua.edu.cn; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/bao21b.html", "aff_unique_index": "0+1;0;0;1;0;0", "aff_unique_norm": "Tsinghua University;Huawei", "aff_unique_dep": "Dept. of Comp. Sci. & Tech.;Noah\u2019s Ark Lab", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.huawei.com", "aff_unique_abbr": "THU;Huawei", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Beijing;", "aff_country_unique_index": "0+0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Variational Auto-Regressive Gaussian Processes for Continual Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9871", "id": "9871", "proceeding": "http://proceedings.mlr.press/v139/kapoor21b.html", "slides": "/media/icml-2021/Slides/9871.pdf", "author_site": "Sanyam Kapoor, Theofanis Karaletsos, Thang Bui", "author": "Sanyam Kapoor; Theofanis Karaletsos; Thang D Bui", "abstract": "Through sequential construction of posteriors on observing data online, Bayes\u2019 theorem provides a natural framework for continual learning. We develop Variational Auto-Regressive Gaussian Processes (VAR-GPs), a principled posterior updating mechanism to solve sequential tasks in continual learning. By relying on sparse inducing point approximations for scalable posteriors, we propose a novel auto-regressive variational distribution which reveals two fruitful connections to existing results in Bayesian inference, expectation propagation and orthogonal inducing points. Mean predictive entropy estimates show VAR-GPs prevent catastrophic forgetting, which is empirically supported by strong performance on modern continual learning benchmarks against competitive baselines. A thorough ablation study demonstrates the efficacy of our modeling choices.", "bibtex": "@InProceedings{pmlr-v139-kapoor21b,\n title = \t {Variational Auto-Regressive Gaussian Processes for Continual Learning},\n author = {Kapoor, Sanyam and Karaletsos, Theofanis and Bui, Thang D},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5290--5300},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kapoor21b/kapoor21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/kapoor21b.html},\n abstract = \t {Through sequential construction of posteriors on observing data online, Bayes\u2019 theorem provides a natural framework for continual learning. We develop Variational Auto-Regressive Gaussian Processes (VAR-GPs), a principled posterior updating mechanism to solve sequential tasks in continual learning. By relying on sparse inducing point approximations for scalable posteriors, we propose a novel auto-regressive variational distribution which reveals two fruitful connections to existing results in Bayesian inference, expectation propagation and orthogonal inducing points. Mean predictive entropy estimates show VAR-GPs prevent catastrophic forgetting, which is empirically supported by strong performance on modern continual learning benchmarks against competitive baselines. A thorough ablation study demonstrates the efficacy of our modeling choices.}\n}", "pdf": "http://proceedings.mlr.press/v139/kapoor21b/kapoor21b.pdf", "supp": "", "pdf_size": 1753682, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11399430121097777886&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Center for Data Science, New York University, New York, NY, USA; Facebook Inc., Menlo Park, CA, USA; University of Sydney, Sydney, NSW, Australia", "aff_domain": "nyu.edu; ; ", "email": "nyu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kapoor21b.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "New York University;Meta;University of Sydney", "aff_unique_dep": "Center for Data Science;Facebook Inc.;", "aff_unique_url": "https://www.nyu.edu;https://www.facebook.com;https://www.sydney.edu.au", "aff_unique_abbr": "NYU;Facebook;USYD", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "New York;Menlo Park;Sydney", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;Australia" }, { "title": "Variational Data Assimilation with a Learned Inverse Observation Operator", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9289", "id": "9289", "proceeding": "http://proceedings.mlr.press/v139/frerix21a.html", "slides": "", "author_site": "Thomas Frerix, Dmitrii Kochkov, Jamie Smith, Daniel Cremers, Michael Brenner, Stephan Hoyer", "author": "Thomas Frerix; Dmitrii Kochkov; Jamie Smith; Daniel Cremers; Michael Brenner; Stephan Hoyer", "abstract": "Variational data assimilation optimizes for an initial state of a dynamical system such that its evolution fits observational data. The physical model can subsequently be evolved into the future to make predictions. This principle is a cornerstone of large scale forecasting applications such as numerical weather prediction. As such, it is implemented in current operational systems of weather forecasting agencies across the globe. However, finding a good initial state poses a difficult optimization problem in part due to the non-invertible relationship between physical states and their corresponding observations. We learn a mapping from observational data to physical states and show how it can be used to improve optimizability. We employ this mapping in two ways: to better initialize the non-convex optimization problem, and to reformulate the objective function in better behaved physics space instead of observation space. Our experimental results for the Lorenz96 model and a two-dimensional turbulent fluid flow demonstrate that this procedure significantly improves forecast quality for chaotic systems.", "bibtex": "@InProceedings{pmlr-v139-frerix21a,\n title = \t {Variational Data Assimilation with a Learned Inverse Observation Operator},\n author = {Frerix, Thomas and Kochkov, Dmitrii and Smith, Jamie and Cremers, Daniel and Brenner, Michael and Hoyer, Stephan},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3449--3458},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/frerix21a/frerix21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/frerix21a.html},\n abstract = \t {Variational data assimilation optimizes for an initial state of a dynamical system such that its evolution fits observational data. The physical model can subsequently be evolved into the future to make predictions. This principle is a cornerstone of large scale forecasting applications such as numerical weather prediction. As such, it is implemented in current operational systems of weather forecasting agencies across the globe. However, finding a good initial state poses a difficult optimization problem in part due to the non-invertible relationship between physical states and their corresponding observations. We learn a mapping from observational data to physical states and show how it can be used to improve optimizability. We employ this mapping in two ways: to better initialize the non-convex optimization problem, and to reformulate the objective function in better behaved physics space instead of observation space. Our experimental results for the Lorenz96 model and a two-dimensional turbulent fluid flow demonstrate that this procedure significantly improves forecast quality for chaotic systems.}\n}", "pdf": "http://proceedings.mlr.press/v139/frerix21a/frerix21a.pdf", "supp": "", "pdf_size": 5974411, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9123657318704968381&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google Research + Technical University of Munich; Google Research; Google Research; Technical University of Munich; Google Research + Harvard University; Google Research", "aff_domain": "tum.de; ; ; ;google.com; ", "email": "tum.de; ; ; ;google.com; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/frerix21a.html", "aff_unique_index": "0+1;0;0;1;0+2;0", "aff_unique_norm": "Google;Technical University of Munich;Harvard University", "aff_unique_dep": "Google Research;;", "aff_unique_url": "https://research.google;https://www.tum.de;https://www.harvard.edu", "aff_unique_abbr": "Google Research;TUM;Harvard", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0+1;0;0;1;0+0;0", "aff_country_unique": "United States;Germany" }, { "title": "Variational Empowerment as Representation Learning for Goal-Conditioned Reinforcement Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10367", "id": "10367", "proceeding": "http://proceedings.mlr.press/v139/choi21b.html", "slides": "/media/icml-2021/Slides/10367.pdf", "author_site": "Jongwook Choi, Archit Sharma, Honglak Lee, Sergey Levine, Shixiang Gu", "author": "Jongwook Choi; Archit Sharma; Honglak Lee; Sergey Levine; Shixiang Shane Gu", "abstract": "Learning to reach goal states and learning diverse skills through mutual information maximization have been proposed as principled frameworks for unsupervised reinforcement learning, allowing agents to acquire broadly applicable multi-task policies with minimal reward engineering. In this paper, we discuss how these two approaches {\u2014} goal-conditioned RL (GCRL) and MI-based RL {\u2014} can be generalized into a single family of methods, interpreting mutual information maximization and variational empowerment as representation learning methods that acquire function-ally aware state representations for goal reaching.Starting from a simple observation that the standard GCRL is encapsulated by the optimization objective of variational empowerment, we can derive novel variants of GCRL and variational empowerment under a single, unified optimization objective, such as adaptive-variance GCRL and linear-mapping GCRL, and study the characteristics of representation learning each variant provides. Furthermore, through the lens of GCRL, we show that adapting powerful techniques fromGCRL such as goal relabeling into the variationalMI context as well as proper regularization on the variational posterior provides substantial gains in algorithm performance, and propose a novel evaluation metric named latent goal reaching (LGR)as an objective measure for evaluating empowerment algorithms akin to goal-based RL. Through principled mathematical derivations and careful experimental validations, our work lays a novel foundation from which representation learning can be evaluated and analyzed in goal-based RL", "bibtex": "@InProceedings{pmlr-v139-choi21b,\n title = \t {Variational Empowerment as Representation Learning for Goal-Conditioned Reinforcement Learning},\n author = {Choi, Jongwook and Sharma, Archit and Lee, Honglak and Levine, Sergey and Gu, Shixiang Shane},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1953--1963},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/choi21b/choi21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/choi21b.html},\n abstract = \t {Learning to reach goal states and learning diverse skills through mutual information maximization have been proposed as principled frameworks for unsupervised reinforcement learning, allowing agents to acquire broadly applicable multi-task policies with minimal reward engineering. In this paper, we discuss how these two approaches {\u2014} goal-conditioned RL (GCRL) and MI-based RL {\u2014} can be generalized into a single family of methods, interpreting mutual information maximization and variational empowerment as representation learning methods that acquire function-ally aware state representations for goal reaching.Starting from a simple observation that the standard GCRL is encapsulated by the optimization objective of variational empowerment, we can derive novel variants of GCRL and variational empowerment under a single, unified optimization objective, such as adaptive-variance GCRL and linear-mapping GCRL, and study the characteristics of representation learning each variant provides. Furthermore, through the lens of GCRL, we show that adapting powerful techniques fromGCRL such as goal relabeling into the variationalMI context as well as proper regularization on the variational posterior provides substantial gains in algorithm performance, and propose a novel evaluation metric named latent goal reaching (LGR)as an objective measure for evaluating empowerment algorithms akin to goal-based RL. Through principled mathematical derivations and careful experimental validations, our work lays a novel foundation from which representation learning can be evaluated and analyzed in goal-based RL}\n}", "pdf": "http://proceedings.mlr.press/v139/choi21b/choi21b.pdf", "supp": "", "pdf_size": 4811341, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13960392295750910856&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 3, "aff": "University of Michigan+Google; Stanford University+Google; LG AI Research+University of Michigan; Google Research; University of California, Berkeley+Google", "aff_domain": "umich.edu; ; ;google.com; ", "email": "umich.edu; ; ;google.com; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/choi21b.html", "aff_unique_index": "0+1;2+1;3+0;1;4+1", "aff_unique_norm": "University of Michigan;Google;Stanford University;LG;University of California, Berkeley", "aff_unique_dep": ";Google;;LG AI Research;", "aff_unique_url": "https://www.umich.edu;https://www.google.com;https://www.stanford.edu;https://www.lgaires.com;https://www.berkeley.edu", "aff_unique_abbr": "UM;Google;Stanford;LG AI;UC Berkeley", "aff_campus_unique_index": "1;2+1;;1;3+1", "aff_campus_unique": ";Mountain View;Stanford;Berkeley", "aff_country_unique_index": "0+0;0+0;1+0;0;0+0", "aff_country_unique": "United States;South Korea" }, { "title": "Vector Quantized Models for Planning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9543", "id": "9543", "proceeding": "http://proceedings.mlr.press/v139/ozair21a.html", "slides": "", "author_site": "Sherjil Ozair, Yazhe Li, Ali Razavi, Ioannis Antonoglou, A\u00e4ron van den Oord, Oriol Vinyals", "author": "Sherjil Ozair; Yazhe Li; Ali Razavi; Ioannis Antonoglou; Aaron Van Den Oord; Oriol Vinyals", "abstract": "Recent developments in the field of model-based RL have proven successful in a range of environments, especially ones where planning is essential. However, such successes have been limited to deterministic fully-observed environments. We present a new approach that handles stochastic and partially-observable environments. Our key insight is to use discrete autoencoders to capture the multiple possible effects of an action in a stochastic environment. We use a stochastic variant of Monte Carlo tree search to plan over both the agent\u2019s actions and the discrete latent variables representing the environment\u2019s response. Our approach significantly outperforms an offline version of MuZero on a stochastic interpretation of chess where the opponent is considered part of the environment. We also show that our approach scales to DeepMind Lab, a first-person 3D environment with large visual observations and partial observability.", "bibtex": "@InProceedings{pmlr-v139-ozair21a,\n title = \t {Vector Quantized Models for Planning},\n author = {Ozair, Sherjil and Li, Yazhe and Razavi, Ali and Antonoglou, Ioannis and Van Den Oord, Aaron and Vinyals, Oriol},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8302--8313},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ozair21a/ozair21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ozair21a.html},\n abstract = \t {Recent developments in the field of model-based RL have proven successful in a range of environments, especially ones where planning is essential. However, such successes have been limited to deterministic fully-observed environments. We present a new approach that handles stochastic and partially-observable environments. Our key insight is to use discrete autoencoders to capture the multiple possible effects of an action in a stochastic environment. We use a stochastic variant of Monte Carlo tree search to plan over both the agent\u2019s actions and the discrete latent variables representing the environment\u2019s response. Our approach significantly outperforms an offline version of MuZero on a stochastic interpretation of chess where the opponent is considered part of the environment. We also show that our approach scales to DeepMind Lab, a first-person 3D environment with large visual observations and partial observability.}\n}", "pdf": "http://proceedings.mlr.press/v139/ozair21a/ozair21a.pdf", "supp": "", "pdf_size": 5226656, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18345327779019208584&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "DeepMind, London, United Kingdom+Mila, University of Montreal; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "deepmind.com;deepmind.com; ; ; ; ", "email": "deepmind.com;deepmind.com; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/ozair21a.html", "aff_unique_index": "0+1;0;0;0;0;0", "aff_unique_norm": "DeepMind;University of Montreal", "aff_unique_dep": ";Mila", "aff_unique_url": "https://deepmind.com;https://www.mila.quebec", "aff_unique_abbr": "DeepMind;Mila", "aff_campus_unique_index": "0+1;0;0;0;0;0", "aff_campus_unique": "London;Montreal", "aff_country_unique_index": "0+1;0;0;0;0;0", "aff_country_unique": "United Kingdom;Canada" }, { "title": "Versatile Verification of Tree Ensembles", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9897", "id": "9897", "proceeding": "http://proceedings.mlr.press/v139/devos21a.html", "slides": "", "author_site": "Laurens Devos, Wannes Meert, Jesse Davis", "author": "Laurens Devos; Wannes Meert; Jesse Davis", "abstract": "Machine learned models often must abide by certain requirements (e.g., fairness or legal). This has spurred interested in developing approaches that can provably verify whether a model satisfies certain properties. This paper introduces a generic algorithm called Veritas that enables tackling multiple different verification tasks for tree ensemble models like random forests (RFs) and gradient boosted decision trees (GBDTs). This generality contrasts with previous work, which has focused exclusively on either adversarial example generation or robustness checking. Veritas formulates the verification task as a generic optimization problem and introduces a novel search space representation. Veritas offers two key advantages. First, it provides anytime lower and upper bounds when the optimization problem cannot be solved exactly. In contrast, many existing methods have focused on exact solutions and are thus limited by the verification problem being NP-complete. Second, Veritas produces full (bounded suboptimal) solutions that can be used to generate concrete examples. We experimentally show that our method produces state-of-the-art robustness estimates, especially when executed with strict time constraints. This is exceedingly important when checking the robustness of large datasets. Additionally, we show that Veritas enables tackling more real-world verification scenarios.", "bibtex": "@InProceedings{pmlr-v139-devos21a,\n title = \t {Versatile Verification of Tree Ensembles},\n author = {Devos, Laurens and Meert, Wannes and Davis, Jesse},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2654--2664},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/devos21a/devos21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/devos21a.html},\n abstract = \t {Machine learned models often must abide by certain requirements (e.g., fairness or legal). This has spurred interested in developing approaches that can provably verify whether a model satisfies certain properties. This paper introduces a generic algorithm called Veritas that enables tackling multiple different verification tasks for tree ensemble models like random forests (RFs) and gradient boosted decision trees (GBDTs). This generality contrasts with previous work, which has focused exclusively on either adversarial example generation or robustness checking. Veritas formulates the verification task as a generic optimization problem and introduces a novel search space representation. Veritas offers two key advantages. First, it provides anytime lower and upper bounds when the optimization problem cannot be solved exactly. In contrast, many existing methods have focused on exact solutions and are thus limited by the verification problem being NP-complete. Second, Veritas produces full (bounded suboptimal) solutions that can be used to generate concrete examples. We experimentally show that our method produces state-of-the-art robustness estimates, especially when executed with strict time constraints. This is exceedingly important when checking the robustness of large datasets. Additionally, we show that Veritas enables tackling more real-world verification scenarios.}\n}", "pdf": "http://proceedings.mlr.press/v139/devos21a/devos21a.pdf", "supp": "", "pdf_size": 748288, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16419931013195180348&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, KU Leuven, Leuven, Belgium; Department of Computer Science, KU Leuven, Leuven, Belgium; Department of Computer Science, KU Leuven, Leuven, Belgium", "aff_domain": "kuleuven.be; ; ", "email": "kuleuven.be; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/devos21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "KU Leuven", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.kuleuven.be", "aff_unique_abbr": "KU Leuven", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Leuven", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Belgium" }, { "title": "ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9491", "id": "9491", "proceeding": "http://proceedings.mlr.press/v139/kim21k.html", "slides": "", "author_site": "Wonjae Kim, Bokyung Son, Ildoo Kim", "author": "Wonjae Kim; Bokyung Son; Ildoo Kim", "abstract": "Vision-and-Language Pre-training (VLP) has improved performance on various joint vision-and-language downstream tasks. Current approaches to VLP heavily rely on image feature extraction processes, most of which involve region supervision (e.g., object detection) and the convolutional architecture (e.g., ResNet). Although disregarded in the literature, we find it problematic in terms of both (1) efficiency/speed, that simply extracting input features requires much more computation than the multimodal interaction steps; and (2) expressive power, as it is upper bounded to the expressive power of the visual embedder and its predefined visual vocabulary. In this paper, we present a minimal VLP model, Vision-and-Language Transformer (ViLT), monolithic in the sense that the processing of visual inputs is drastically simplified to just the same convolution-free manner that we process textual inputs. We show that ViLT is up to tens of times faster than previous VLP models, yet with competitive or better downstream task performance. Our code and pre-trained weights are available at https://github.com/dandelin/vilt.", "bibtex": "@InProceedings{pmlr-v139-kim21k,\n title = \t {ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision},\n author = {Kim, Wonjae and Son, Bokyung and Kim, Ildoo},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5583--5594},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kim21k/kim21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/kim21k.html},\n abstract = \t {Vision-and-Language Pre-training (VLP) has improved performance on various joint vision-and-language downstream tasks. Current approaches to VLP heavily rely on image feature extraction processes, most of which involve region supervision (e.g., object detection) and the convolutional architecture (e.g., ResNet). Although disregarded in the literature, we find it problematic in terms of both (1) efficiency/speed, that simply extracting input features requires much more computation than the multimodal interaction steps; and (2) expressive power, as it is upper bounded to the expressive power of the visual embedder and its predefined visual vocabulary. In this paper, we present a minimal VLP model, Vision-and-Language Transformer (ViLT), monolithic in the sense that the processing of visual inputs is drastically simplified to just the same convolution-free manner that we process textual inputs. We show that ViLT is up to tens of times faster than previous VLP models, yet with competitive or better downstream task performance. Our code and pre-trained weights are available at https://github.com/dandelin/vilt.}\n}", "pdf": "http://proceedings.mlr.press/v139/kim21k/kim21k.pdf", "supp": "", "pdf_size": 6329327, "gs_citation": 2071, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12987945369444025427&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Kakao Enterprise, Seongnam, Gyeonggi, Republic of Korea + NAVER AI Lab, Seongnam, Gyeonggi, Republic of Korea; Kakao Enterprise, Seongnam, Gyeonggi, Republic of Korea; Kakao Brain, Seongnam, Gyeonggi, Republic of Korea", "aff_domain": "navercorp.com; ; ", "email": "navercorp.com; ; ", "github": "https://github.com/dandelin/vilt", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/kim21k.html", "aff_unique_index": "0+1;0;2", "aff_unique_norm": "Kakao Enterprise;NAVER AI Lab;Kakao Brain", "aff_unique_dep": ";AI Lab;", "aff_unique_url": "https://www.kakaoenterprise.com;https://www.naver.com;https://brain.kakao.com", "aff_unique_abbr": "Kakao Enterprise;NAVER AI Lab;Kakao Brain", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Seongnam", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "South Korea" }, { "title": "Voice2Series: Reprogramming Acoustic Models for Time Series Classification", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9059", "id": "9059", "proceeding": "http://proceedings.mlr.press/v139/yang21j.html", "slides": "/media/icml-2021/Slides/9059.pdf", "author_site": "Huck Yang, Yun-Yun Tsai, Pin-Yu Chen", "author": "Chao-Han Huck Yang; Yun-Yun Tsai; Pin-Yu Chen", "abstract": "Learning to classify time series with limited data is a practical yet challenging problem. Current methods are primarily based on hand-designed feature extraction rules or domain-specific data augmentation. Motivated by the advances in deep speech processing models and the fact that voice data are univariate temporal signals, in this paper we propose Voice2Serie (V2S), a novel end-to-end approach that reprograms acoustic models for time series classification, through input transformation learning and output label mapping. Leveraging the representation learning power of a large-scale pre-trained speech processing model, on 31 different time series tasks we show that V2S outperforms or is on part with state-of-the-art methods on 22 tasks, and improves their average accuracy by 1.72%. We further provide theoretical justification of V2S by proving its population risk is upper bounded by the source risk and a Wasserstein distance accounting for feature alignment via reprogramming. Our results offer new and effective means to time series classification.", "bibtex": "@InProceedings{pmlr-v139-yang21j,\n title = \t {Voice2Series: Reprogramming Acoustic Models for Time Series Classification},\n author = {Yang, Chao-Han Huck and Tsai, Yun-Yun and Chen, Pin-Yu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11808--11819},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21j/yang21j.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21j.html},\n abstract = \t {Learning to classify time series with limited data is a practical yet challenging problem. Current methods are primarily based on hand-designed feature extraction rules or domain-specific data augmentation. Motivated by the advances in deep speech processing models and the fact that voice data are univariate temporal signals, in this paper we propose Voice2Serie (V2S), a novel end-to-end approach that reprograms acoustic models for time series classification, through input transformation learning and output label mapping. Leveraging the representation learning power of a large-scale pre-trained speech processing model, on 31 different time series tasks we show that V2S outperforms or is on part with state-of-the-art methods on 22 tasks, and improves their average accuracy by 1.72%. We further provide theoretical justification of V2S by proving its population risk is upper bounded by the source risk and a Wasserstein distance accounting for feature alignment via reprogramming. Our results offer new and effective means to time series classification.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21j/yang21j.pdf", "supp": "", "pdf_size": 1584515, "gs_citation": 169, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=436573915483653789&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Georgia Institute of Technology; Columbia University; IBM Research", "aff_domain": "gatech.edu; ;ibm.com", "email": "gatech.edu; ;ibm.com", "github": "https://github.com/huckiyang/Voice2Series-Reprogramming", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yang21j.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Georgia Institute of Technology;Columbia University;IBM", "aff_unique_dep": ";;IBM Research", "aff_unique_url": "https://www.gatech.edu;https://www.columbia.edu;https://www.ibm.com/research", "aff_unique_abbr": "Georgia Tech;Columbia;IBM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "WGAN with an Infinitely Wide Generator Has No Spurious Stationary Points", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10233", "id": "10233", "proceeding": "http://proceedings.mlr.press/v139/no21a.html", "slides": "", "author_site": "Albert No, TaeHo Yoon, Sehyun Kwon, Ernest Ryu", "author": "Albert No; Taeho Yoon; Kwon Sehyun; Ernest K Ryu", "abstract": "Generative adversarial networks (GAN) are a widely used class of deep generative models, but their minimax training dynamics are not understood very well. In this work, we show that GANs with a 2-layer infinite-width generator and a 2-layer finite-width discriminator trained with stochastic gradient ascent-descent have no spurious stationary points. We then show that when the width of the generator is finite but wide, there are no spurious stationary points within a ball whose radius becomes arbitrarily large (to cover the entire parameter space) as the width goes to infinity.", "bibtex": "@InProceedings{pmlr-v139-no21a,\n title = \t {WGAN with an Infinitely Wide Generator Has No Spurious Stationary Points},\n author = {No, Albert and Yoon, Taeho and Sehyun, Kwon and Ryu, Ernest K},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8205--8215},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/no21a/no21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/no21a.html},\n abstract = \t {Generative adversarial networks (GAN) are a widely used class of deep generative models, but their minimax training dynamics are not understood very well. In this work, we show that GANs with a 2-layer infinite-width generator and a 2-layer finite-width discriminator trained with stochastic gradient ascent-descent have no spurious stationary points. We then show that when the width of the generator is finite but wide, there are no spurious stationary points within a ball whose radius becomes arbitrarily large (to cover the entire parameter space) as the width goes to infinity.}\n}", "pdf": "http://proceedings.mlr.press/v139/no21a/no21a.pdf", "supp": "", "pdf_size": 807746, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2540862355442244934&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electronic and Electrical Engineering, Hongik University, Seoul, Korea; Department of Mathematical Sciences, Seoul National University, Seoul, Korea; Department of Mathematical Sciences, Seoul National University, Seoul, Korea; Department of Mathematical Sciences, Seoul National University, Seoul, Korea", "aff_domain": ";snu.ac.kr\";snu.ac.kr\";snu.ac.kr", "email": ";snu.ac.kr\";snu.ac.kr\";snu.ac.kr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/no21a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Hongik University;Seoul National University", "aff_unique_dep": "Department of Electronic and Electrical Engineering;Department of Mathematical Sciences", "aff_unique_url": "http://www.hongik.ac.kr;https://www.snu.ac.kr", "aff_unique_abbr": "Hongik;SNU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "South Korea" }, { "title": "WILDS: A Benchmark of in-the-Wild Distribution Shifts", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10117", "id": "10117", "proceeding": "http://proceedings.mlr.press/v139/koh21a.html", "slides": "/media/icml-2021/Slides/10117_jpbP2Rf.pdf", "author_site": "Pang Wei Koh, Shiori Sagawa, Henrik Marklund, Sang Michael Xie, Marvin Zhang, Akshay Balsubramani, Weihua Hu, Michihiro Yasunaga, Richard Lanas Phillips, Irena Gao, Tony Lee, Etienne David, Ian Stavness, Wei Guo, Berton Earnshaw, Imran Haque, Sara Beery, Jure Leskovec, Anshul Kundaje, Emma Pierson, Sergey Levine, Chelsea Finn, Percy Liang", "author": "Pang Wei Koh; Shiori Sagawa; Henrik Marklund; Sang Michael Xie; Marvin Zhang; Akshay Balsubramani; Weihua Hu; Michihiro Yasunaga; Richard Lanas Phillips; Irena Gao; Tony Lee; Etienne David; Ian Stavness; Wei Guo; Berton Earnshaw; Imran Haque; Sara M Beery; Jure Leskovec; Anshul Kundaje; Emma Pierson; Sergey Levine; Chelsea Finn; Percy Liang", "abstract": "Distribution shifts\u2014where the training distribution differs from the test distribution\u2014can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.", "bibtex": "@InProceedings{pmlr-v139-koh21a,\n title = \t {WILDS: A Benchmark of in-the-Wild Distribution Shifts},\n author = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran and Beery, Sara M and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5637--5664},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/koh21a/koh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/koh21a.html},\n abstract = \t {Distribution shifts\u2014where the training distribution differs from the test distribution\u2014can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.}\n}", "pdf": "http://proceedings.mlr.press/v139/koh21a/koh21a.pdf", "supp": "", "pdf_size": 3317760, "gs_citation": 1715, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11557463912604627857&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Stanford; Stanford; Stanford; Stanford; UC Berkeley; Stanford; Stanford; Stanford; Cornell; Stanford; Stanford; INRAE; USask; UTokyo; Recursion; Recursion; Caltech; Stanford; Stanford; Cornell+Microsoft Research; UC Berkeley; Stanford; Stanford", "aff_domain": "cs.stanford.edu;cs.stanford.edu; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;cs.stanford.edu", "github": "", "project": "https://wilds.stanford.edu", "author_num": 23, "oa": "https://proceedings.mlr.press/v139/koh21a.html", "aff_unique_index": "0;0;0;0;1;0;0;0;2;0;0;3;4;5;6;6;7;0;0;2+8;1;0;0", "aff_unique_norm": "Stanford University;University of California, Berkeley;Cornell University;INRAE;University of Saskatchewan;University of Tokyo;Recursion;California Institute of Technology;Microsoft", "aff_unique_dep": ";;;;;;;;Microsoft Research", "aff_unique_url": "https://www.stanford.edu;https://www.berkeley.edu;https://www.cornell.edu;https://www.inrae.fr;https://www.usask.ca;https://www.u-tokyo.ac.jp;;https://www.caltech.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Stanford;UC Berkeley;Cornell;INRAE;USask;UTokyo;;Caltech;MSR", "aff_campus_unique_index": "0;0;0;0;1;0;0;0;0;0;3;0;0;;1;0;0", "aff_campus_unique": "Stanford;Berkeley;;Pasadena", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;1;2;3;0;0;0;0+0;0;0;0", "aff_country_unique": "United States;France;Canada;Japan;" }, { "title": "Wasserstein Distributional Normalization For Robust Distributional Certification of Noisy Labeled Data", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10301", "id": "10301", "proceeding": "http://proceedings.mlr.press/v139/park21a.html", "slides": "", "author_site": "Sung Woo Park, Junseok Kwon", "author": "Sung Woo Park; Junseok Kwon", "abstract": "We propose a novel Wasserstein distributional normalization method that can classify noisy labeled data accurately. Recently, noisy labels have been successfully handled based on small-loss criteria, but have not been clearly understood from the theoretical point of view. In this paper, we address this problem by adopting distributionally robust optimization (DRO). In particular, we present a theoretical investigation of the distributional relationship between uncertain and certain samples based on the small-loss criteria. Our method takes advantage of this relationship to exploit useful information from uncertain samples. To this end, we normalize uncertain samples into the robustly certified region by introducing the non-parametric Ornstein-Ulenbeck type of Wasserstein gradient flows called Wasserstein distributional normalization, which is cheap and fast to implement. We verify that network confidence and distributional certification are fundamentally correlated and show the concentration inequality when the network escapes from over-parameterization. Experimental results demonstrate that our non-parametric classification method outperforms other parametric baselines on the Clothing1M and CIFAR-10/100 datasets when the data have diverse noisy labels.", "bibtex": "@InProceedings{pmlr-v139-park21a,\n title = \t {Wasserstein Distributional Normalization For Robust Distributional Certification of Noisy Labeled Data},\n author = {Park, Sung Woo and Kwon, Junseok},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8381--8390},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/park21a/park21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/park21a.html},\n abstract = \t {We propose a novel Wasserstein distributional normalization method that can classify noisy labeled data accurately. Recently, noisy labels have been successfully handled based on small-loss criteria, but have not been clearly understood from the theoretical point of view. In this paper, we address this problem by adopting distributionally robust optimization (DRO). In particular, we present a theoretical investigation of the distributional relationship between uncertain and certain samples based on the small-loss criteria. Our method takes advantage of this relationship to exploit useful information from uncertain samples. To this end, we normalize uncertain samples into the robustly certified region by introducing the non-parametric Ornstein-Ulenbeck type of Wasserstein gradient flows called Wasserstein distributional normalization, which is cheap and fast to implement. We verify that network confidence and distributional certification are fundamentally correlated and show the concentration inequality when the network escapes from over-parameterization. Experimental results demonstrate that our non-parametric classification method outperforms other parametric baselines on the Clothing1M and CIFAR-10/100 datasets when the data have diverse noisy labels.}\n}", "pdf": "http://proceedings.mlr.press/v139/park21a/park21a.pdf", "supp": "", "pdf_size": 553466, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3664457085996877127&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computer Science and Engineering, Artificial Intelligence Graduate School, Chung-Ang University, Seoul, Korea; School of Computer Science and Engineering, Artificial Intelligence Graduate School, Chung-Ang University, Seoul, Korea", "aff_domain": "gmail.com;cau.ac.kr", "email": "gmail.com;cau.ac.kr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/park21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Chung-Ang University", "aff_unique_dep": "School of Computer Science and Engineering", "aff_unique_url": "http://www.cau.ac.kr", "aff_unique_abbr": "CAU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0;0", "aff_country_unique": "South Korea" }, { "title": "Watermarking Deep Neural Networks with Greedy Residuals", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10595", "id": "10595", "proceeding": "http://proceedings.mlr.press/v139/liu21x.html", "slides": "", "author_site": "Hanwen Liu, Zhenyu Weng, Yuesheng Zhu", "author": "Hanwen Liu; Zhenyu Weng; Yuesheng Zhu", "abstract": "Deep neural networks (DNNs) are considered as intellectual property of their corresponding owners and thus are in urgent need of ownership protection, due to the massive amount of time and resources invested in designing, tuning and training them. In this paper, we propose a novel watermark-based ownership protection method by using the residuals of important parameters. Different from other watermark-based ownership protection methods that rely on some specific neural network architectures and during verification require external data source, namely ownership indicators, our method does not explicitly use ownership indicators for verification to defeat various attacks against DNN watermarks. Specifically, we greedily select a few and important model parameters for embedding so that the impairment caused by the changed parameters can be reduced and the robustness against different attacks can be improved as the selected parameters can well preserve the model information. Also, without the external data sources for verification, the adversary can hardly cast doubts on ownership verification by forging counterfeit watermarks. The extensive experiments show that our method outperforms previous state-of-the-art methods in five tasks.", "bibtex": "@InProceedings{pmlr-v139-liu21x,\n title = \t {Watermarking Deep Neural Networks with Greedy Residuals},\n author = {Liu, Hanwen and Weng, Zhenyu and Zhu, Yuesheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6978--6988},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/liu21x/liu21x.pdf},\n url = \t {https://proceedings.mlr.press/v139/liu21x.html},\n abstract = \t {Deep neural networks (DNNs) are considered as intellectual property of their corresponding owners and thus are in urgent need of ownership protection, due to the massive amount of time and resources invested in designing, tuning and training them. In this paper, we propose a novel watermark-based ownership protection method by using the residuals of important parameters. Different from other watermark-based ownership protection methods that rely on some specific neural network architectures and during verification require external data source, namely ownership indicators, our method does not explicitly use ownership indicators for verification to defeat various attacks against DNN watermarks. Specifically, we greedily select a few and important model parameters for embedding so that the impairment caused by the changed parameters can be reduced and the robustness against different attacks can be improved as the selected parameters can well preserve the model information. Also, without the external data sources for verification, the adversary can hardly cast doubts on ownership verification by forging counterfeit watermarks. The extensive experiments show that our method outperforms previous state-of-the-art methods in five tasks.}\n}", "pdf": "http://proceedings.mlr.press/v139/liu21x/liu21x.pdf", "supp": "", "pdf_size": 8201935, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2111193400086387251&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "School of Electronic and Computer Engineering, Peking University; School of Electronic and Computer Engineering, Peking University; School of Electronic and Computer Engineering, Peking University", "aff_domain": "pku.edu.cn; ; ", "email": "pku.edu.cn; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/liu21x.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Peking University", "aff_unique_dep": "School of Electronic and Computer Engineering", "aff_unique_url": "http://www.pku.edu.cn", "aff_unique_abbr": "PKU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Weight-covariance alignment for adversarially robust neural networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9309", "id": "9309", "proceeding": "http://proceedings.mlr.press/v139/eustratiadis21a.html", "slides": "/media/icml-2021/Slides/9309.pdf", "author_site": "Panagiotis Eustratiadis, Henry Gouk, Da Li, Timothy Hospedales", "author": "Panagiotis Eustratiadis; Henry Gouk; Da Li; Timothy Hospedales", "abstract": "Stochastic Neural Networks (SNNs) that inject noise into their hidden layers have recently been shown to achieve strong robustness against adversarial attacks. However, existing SNNs are usually heuristically motivated, and often rely on adversarial training, which is computationally costly. We propose a new SNN that achieves state-of-the-art performance without relying on adversarial training, and enjoys solid theoretical justification. Specifically, while existing SNNs inject learned or hand-tuned isotropic noise, our SNN learns an anisotropic noise distribution to optimize a learning-theoretic bound on adversarial robustness. We evaluate our method on a number of popular benchmarks, show that it can be applied to different architectures, and that it provides robustness to a variety of white-box and black-box attacks, while being simple and fast to train compared to existing alternatives.", "bibtex": "@InProceedings{pmlr-v139-eustratiadis21a,\n title = \t {Weight-covariance alignment for adversarially robust neural networks},\n author = {Eustratiadis, Panagiotis and Gouk, Henry and Li, Da and Hospedales, Timothy},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3047--3056},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/eustratiadis21a/eustratiadis21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/eustratiadis21a.html},\n abstract = \t {Stochastic Neural Networks (SNNs) that inject noise into their hidden layers have recently been shown to achieve strong robustness against adversarial attacks. However, existing SNNs are usually heuristically motivated, and often rely on adversarial training, which is computationally costly. We propose a new SNN that achieves state-of-the-art performance without relying on adversarial training, and enjoys solid theoretical justification. Specifically, while existing SNNs inject learned or hand-tuned isotropic noise, our SNN learns an anisotropic noise distribution to optimize a learning-theoretic bound on adversarial robustness. We evaluate our method on a number of popular benchmarks, show that it can be applied to different architectures, and that it provides robustness to a variety of white-box and black-box attacks, while being simple and fast to train compared to existing alternatives.}\n}", "pdf": "http://proceedings.mlr.press/v139/eustratiadis21a/eustratiadis21a.pdf", "supp": "", "pdf_size": 3559528, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3465502784075773283&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Edinburgh; University of Edinburgh; University of Edinburgh + Samsung AI Center, Cambridge; University of Edinburgh + Samsung AI Center, Cambridge", "aff_domain": "ed.ac.uk;ed.ac.uk;gmail.com;ed.ac.uk", "email": "ed.ac.uk;ed.ac.uk;gmail.com;ed.ac.uk", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/eustratiadis21a.html", "aff_unique_index": "0;0;0+1;0+1", "aff_unique_norm": "University of Edinburgh;Samsung", "aff_unique_dep": ";AI Center", "aff_unique_url": "https://www.ed.ac.uk;https://www.samsung.com/global/research-innovation/ai-research-centers/samsung-ai-center-cambridge/", "aff_unique_abbr": "Edinburgh;SAC", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;0;0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Weisfeiler and Lehman Go Topological: Message Passing Simplicial Networks", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10197", "id": "10197", "proceeding": "http://proceedings.mlr.press/v139/bodnar21a.html", "slides": "", "author_site": "Cristian Bodnar, Fabrizio Frasca, Yuguang Wang, Nina Otter, Guido Montufar, Pietro Li\u00f3, Michael Bronstein", "author": "Cristian Bodnar; Fabrizio Frasca; Yuguang Wang; Nina Otter; Guido F Montufar; Pietro Li\u00f3; Michael Bronstein", "abstract": "The pairwise interaction paradigm of graph machine learning has predominantly governed the modelling of relational systems. However, graphs alone cannot capture the multi-level interactions present in many complex systems and the expressive power of such schemes was proven to be limited. To overcome these limitations, we propose Message Passing Simplicial Networks (MPSNs), a class of models that perform message passing on simplicial complexes (SCs). To theoretically analyse the expressivity of our model we introduce a Simplicial Weisfeiler-Lehman (SWL) colouring procedure for distinguishing non-isomorphic SCs. We relate the power of SWL to the problem of distinguishing non-isomorphic graphs and show that SWL and MPSNs are strictly more powerful than the WL test and not less powerful than the 3-WL test. We deepen the analysis by comparing our model with traditional graph neural networks (GNNs) with ReLU activations in terms of the number of linear regions of the functions they can represent. We empirically support our theoretical claims by showing that MPSNs can distinguish challenging strongly regular graphs for which GNNs fail and, when equipped with orientation equivariant layers, they can improve classification accuracy in oriented SCs compared to a GNN baseline.", "bibtex": "@InProceedings{pmlr-v139-bodnar21a,\n title = \t {Weisfeiler and Lehman Go Topological: Message Passing Simplicial Networks},\n author = {Bodnar, Cristian and Frasca, Fabrizio and Wang, Yuguang and Otter, Nina and Montufar, Guido F and Li{\\'o}, Pietro and Bronstein, Michael},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1026--1037},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/bodnar21a/bodnar21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/bodnar21a.html},\n abstract = \t {The pairwise interaction paradigm of graph machine learning has predominantly governed the modelling of relational systems. However, graphs alone cannot capture the multi-level interactions present in many complex systems and the expressive power of such schemes was proven to be limited. To overcome these limitations, we propose Message Passing Simplicial Networks (MPSNs), a class of models that perform message passing on simplicial complexes (SCs). To theoretically analyse the expressivity of our model we introduce a Simplicial Weisfeiler-Lehman (SWL) colouring procedure for distinguishing non-isomorphic SCs. We relate the power of SWL to the problem of distinguishing non-isomorphic graphs and show that SWL and MPSNs are strictly more powerful than the WL test and not less powerful than the 3-WL test. We deepen the analysis by comparing our model with traditional graph neural networks (GNNs) with ReLU activations in terms of the number of linear regions of the functions they can represent. We empirically support our theoretical claims by showing that MPSNs can distinguish challenging strongly regular graphs for which GNNs fail and, when equipped with orientation equivariant layers, they can improve classification accuracy in oriented SCs compared to a GNN baseline.}\n}", "pdf": "http://proceedings.mlr.press/v139/bodnar21a/bodnar21a.pdf", "supp": "", "pdf_size": 2049282, "gs_citation": 342, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8275189776192061574&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 15, "aff": "Department of Computer Science and Technology, University of Cambridge, UK+Twitter, UK+Department of Computing, Imperial College London, UK+Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany+Institute of Natural Sciences and School of Mathematical Sciences, Shanghai Jiao Tong University, China+School of Mathematics and Statistics, University of New South Wales, Sydney, Australia+Department of Mathematics and Department of Statistics, University of California, Los Angeles, USA; Twitter, UK+Department of Computing, Imperial College London, UK+Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany+Institute of Natural Sciences and School of Mathematical Sciences, Shanghai Jiao Tong University, China+School of Mathematics and Statistics, University of New South Wales, Sydney, Australia; Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany+Institute of Natural Sciences and School of Mathematical Sciences, Shanghai Jiao Tong University, China+School of Mathematics and Statistics, University of New South Wales, Sydney, Australia; Department of Mathematics and Department of Statistics, University of California, Los Angeles, USA; Department of Mathematics and Department of Statistics, University of California, Los Angeles, USA; Department of Computer Science and Technology, University of Cambridge, UK; Department of Computing, Imperial College London, UK", "aff_domain": "cam.ac.uk;twitter.com;mis.mpg.de; ; ; ; ", "email": "cam.ac.uk;twitter.com;mis.mpg.de; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/bodnar21a.html", "aff_unique_index": "0+1+2+3+4+5+6;1+2+3+4+5;3+4+5;6;6;0;2", "aff_unique_norm": "University of Cambridge;Twitter;Imperial College London;Max Planck Institute for Mathematics in the Sciences;Shanghai Jiao Tong University;University of New South Wales;University of California, Los Angeles", "aff_unique_dep": "Department of Computer Science and Technology;;Department of Computing;Mathematics;Institute of Natural Sciences, School of Mathematical Sciences;School of Mathematics and Statistics;Department of Mathematics", "aff_unique_url": "https://www.cam.ac.uk;https://twitter.com;https://www.imperial.ac.uk;https://www.mis.mpg.de;https://www.sjtu.edu.cn;https://www.unsw.edu.au;https://www.ucla.edu", "aff_unique_abbr": "Cambridge;Twitter;Imperial;MPI MIS;SJTU;UNSW;UCLA", "aff_campus_unique_index": "0+2+3+4+5;2+3+4;3+4;5;5;0;2", "aff_campus_unique": "Cambridge;;London;Leipzig;Sydney;Los Angeles", "aff_country_unique_index": "0+0+0+1+2+3+4;0+0+1+2+3;1+2+3;4;4;0;0", "aff_country_unique": "United Kingdom;Germany;China;Australia;United States" }, { "title": "What Are Bayesian Neural Network Posteriors Really Like?", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10335", "id": "10335", "proceeding": "http://proceedings.mlr.press/v139/izmailov21a.html", "slides": "/media/icml-2021/Slides/10335.pdf", "author_site": "Pavel Izmailov, Sharad Vikram, Matthew Hoffman, Andrew Wilson", "author": "Pavel Izmailov; Sharad Vikram; Matthew D Hoffman; Andrew Gordon Gordon Wilson", "abstract": "The posterior over Bayesian neural network (BNN) parameters is extremely high-dimensional and non-convex. For computational reasons, researchers approximate this posterior using inexpensive mini-batch methods such as mean-field variational inference or stochastic-gradient Markov chain Monte Carlo (SGMCMC). To investigate foundational questions in Bayesian deep learning, we instead use full batch Hamiltonian Monte Carlo (HMC) on modern architectures. We show that (1) BNNs can achieve significant performance gains over standard training and deep ensembles; (2) a single long HMC chain can provide a comparable representation of the posterior to multiple shorter chains; (3) in contrast to recent studies, we find posterior tempering is not needed for near-optimal performance, with little evidence for a \u201ccold posterior\u201d effect, which we show is largely an artifact of data augmentation; (4) BMA performance is robust to the choice of prior scale, and relatively similar for diagonal Gaussian, mixture of Gaussian, and logistic priors; (5) Bayesian neural networks show surprisingly poor generalization under domain shift; (6) while cheaper alternatives such as deep ensembles and SGMCMC can provide good generalization, their predictive distributions are distinct from HMC. Notably, deep ensemble predictive distributions are similarly close to HMC as standard SGLD, and closer than standard variational inference.", "bibtex": "@InProceedings{pmlr-v139-izmailov21a,\n title = \t {What Are Bayesian Neural Network Posteriors Really Like?},\n author = {Izmailov, Pavel and Vikram, Sharad and Hoffman, Matthew D and Wilson, Andrew Gordon Gordon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4629--4640},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/izmailov21a/izmailov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/izmailov21a.html},\n abstract = \t {The posterior over Bayesian neural network (BNN) parameters is extremely high-dimensional and non-convex. For computational reasons, researchers approximate this posterior using inexpensive mini-batch methods such as mean-field variational inference or stochastic-gradient Markov chain Monte Carlo (SGMCMC). To investigate foundational questions in Bayesian deep learning, we instead use full batch Hamiltonian Monte Carlo (HMC) on modern architectures. We show that (1) BNNs can achieve significant performance gains over standard training and deep ensembles; (2) a single long HMC chain can provide a comparable representation of the posterior to multiple shorter chains; (3) in contrast to recent studies, we find posterior tempering is not needed for near-optimal performance, with little evidence for a \u201ccold posterior\u201d effect, which we show is largely an artifact of data augmentation; (4) BMA performance is robust to the choice of prior scale, and relatively similar for diagonal Gaussian, mixture of Gaussian, and logistic priors; (5) Bayesian neural networks show surprisingly poor generalization under domain shift; (6) while cheaper alternatives such as deep ensembles and SGMCMC can provide good generalization, their predictive distributions are distinct from HMC. Notably, deep ensemble predictive distributions are similarly close to HMC as standard SGLD, and closer than standard variational inference.}\n}", "pdf": "http://proceedings.mlr.press/v139/izmailov21a/izmailov21a.pdf", "supp": "", "pdf_size": 480328, "gs_citation": 515, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9889838705748674353&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "New York University; Google Research; Google Research; New York University", "aff_domain": "nyu.edu; ; ;cims.nyu.edu", "email": "nyu.edu; ; ;cims.nyu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/izmailov21a.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "New York University;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.nyu.edu;https://research.google", "aff_unique_abbr": "NYU;Google Research", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "What Does Rotation Prediction Tell Us about Classifier Accuracy under Varying Testing Environments?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8587", "id": "8587", "proceeding": "http://proceedings.mlr.press/v139/deng21a.html", "slides": "/media/icml-2021/Slides/8587.pdf", "author_site": "Weijian Deng, Stephen Gould, Liang Zheng", "author": "Weijian Deng; Stephen Gould; Liang Zheng", "abstract": "Understanding classifier decision under novel environments is central to the community, and a common practice is evaluating it on labeled test sets. However, in real-world testing, image annotations are difficult and expensive to obtain, especially when the test environment is changing. A natural question then arises: given a trained classifier, can we evaluate its accuracy on varying unlabeled test sets? In this work, we train semantic classification and rotation prediction in a multi-task way. On a series of datasets, we report an interesting finding, i.e., the semantic classification accuracy exhibits a strong linear relationship with the accuracy of the rotation prediction task (Pearson\u2019s Correlation r > 0.88). This finding allows us to utilize linear regression to estimate classifier performance from the accuracy of rotation prediction which can be obtained on the test set through the freely generated rotation labels.", "bibtex": "@InProceedings{pmlr-v139-deng21a,\n title = \t {What Does Rotation Prediction Tell Us about Classifier Accuracy under Varying Testing Environments?},\n author = {Deng, Weijian and Gould, Stephen and Zheng, Liang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2579--2589},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/deng21a/deng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/deng21a.html},\n abstract = \t {Understanding classifier decision under novel environments is central to the community, and a common practice is evaluating it on labeled test sets. However, in real-world testing, image annotations are difficult and expensive to obtain, especially when the test environment is changing. A natural question then arises: given a trained classifier, can we evaluate its accuracy on varying unlabeled test sets? In this work, we train semantic classification and rotation prediction in a multi-task way. On a series of datasets, we report an interesting finding, i.e., the semantic classification accuracy exhibits a strong linear relationship with the accuracy of the rotation prediction task (Pearson\u2019s Correlation r > 0.88). This finding allows us to utilize linear regression to estimate classifier performance from the accuracy of rotation prediction which can be obtained on the test set through the freely generated rotation labels.}\n}", "pdf": "http://proceedings.mlr.press/v139/deng21a/deng21a.pdf", "supp": "", "pdf_size": 1892746, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10242632577794319039&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "College of Engineering and Computer Science, Australian National University, Canberra, ACT 0200, Australia; College of Engineering and Computer Science, Australian National University, Canberra, ACT 0200, Australia; College of Engineering and Computer Science, Australian National University, Canberra, ACT 0200, Australia", "aff_domain": "anu.edu.au; ; ", "email": "anu.edu.au; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/deng21a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Australian National University", "aff_unique_dep": "College of Engineering and Computer Science", "aff_unique_url": "https://www.anu.edu.au", "aff_unique_abbr": "ANU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Canberra", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Australia" }, { "title": "What Makes for End-to-End Object Detection?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8867", "id": "8867", "proceeding": "http://proceedings.mlr.press/v139/sun21b.html", "slides": "", "author_site": "Peize Sun, Yi Jiang, Enze Xie, Wenqi Shao, Zehuan Yuan, Changhu Wang, Ping Luo", "author": "Peize Sun; Yi Jiang; Enze Xie; Wenqi Shao; Zehuan Yuan; Changhu Wang; Ping Luo", "abstract": "Object detection has recently achieved a breakthrough for removing the last one non-differentiable component in the pipeline, Non-Maximum Suppression (NMS), and building up an end-to-end system. However, what makes for its one-to-one prediction has not been well understood. In this paper, we first point out that one-to-one positive sample assignment is the key factor, while, one-to-many assignment in previous detectors causes redundant predictions in inference. Second, we surprisingly find that even training with one-to-one assignment, previous detectors still produce redundant predictions. We identify that classification cost in matching cost is the main ingredient: (1) previous detectors only consider location cost, (2) by additionally introducing classification cost, previous detectors immediately produce one-to-one prediction during inference. We introduce the concept of score gap to explore the effect of matching cost. Classification cost enlarges the score gap by choosing positive samples as those of highest score in the training iteration and reducing noisy positive samples brought by only location cost. Finally, we demonstrate the advantages of end-to-end object detection on crowded scenes.", "bibtex": "@InProceedings{pmlr-v139-sun21b,\n title = \t {What Makes for End-to-End Object Detection?},\n author = {Sun, Peize and Jiang, Yi and Xie, Enze and Shao, Wenqi and Yuan, Zehuan and Wang, Changhu and Luo, Ping},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9934--9944},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/sun21b/sun21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/sun21b.html},\n abstract = \t {Object detection has recently achieved a breakthrough for removing the last one non-differentiable component in the pipeline, Non-Maximum Suppression (NMS), and building up an end-to-end system. However, what makes for its one-to-one prediction has not been well understood. In this paper, we first point out that one-to-one positive sample assignment is the key factor, while, one-to-many assignment in previous detectors causes redundant predictions in inference. Second, we surprisingly find that even training with one-to-one assignment, previous detectors still produce redundant predictions. We identify that classification cost in matching cost is the main ingredient: (1) previous detectors only consider location cost, (2) by additionally introducing classification cost, previous detectors immediately produce one-to-one prediction during inference. We introduce the concept of score gap to explore the effect of matching cost. Classification cost enlarges the score gap by choosing positive samples as those of highest score in the training iteration and reducing noisy positive samples brought by only location cost. Finally, we demonstrate the advantages of end-to-end object detection on crowded scenes.}\n}", "pdf": "http://proceedings.mlr.press/v139/sun21b/sun21b.pdf", "supp": "", "pdf_size": 2767229, "gs_citation": 117, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17182921757850029040&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, The University of Hong Kong; AI Lab, ByteDance; Department of Computer Science, The University of Hong Kong; Department of Electronic Engineering, The Chinese University of Hong Kong; AI Lab, ByteDance; AI Lab, ByteDance; Department of Computer Science, The University of Hong Kong", "aff_domain": "connect.hku.hk; ; ; ; ; ; ", "email": "connect.hku.hk; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v139/sun21b.html", "aff_unique_index": "0;1;0;2;1;1;0", "aff_unique_norm": "University of Hong Kong;ByteDance;Chinese University of Hong Kong", "aff_unique_dep": "Department of Computer Science;AI Lab;Department of Electronic Engineering", "aff_unique_url": "https://www.hku.hk;https://www.bytedance.com;https://www.cuhk.edu.hk", "aff_unique_abbr": "HKU;ByteDance;CUHK", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Hong Kong SAR;", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "What does LIME really see in images?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9699", "id": "9699", "proceeding": "http://proceedings.mlr.press/v139/garreau21a.html", "slides": "", "author_site": "Damien Garreau, Dina Mardaoui", "author": "Damien Garreau; Dina Mardaoui", "abstract": "The performance of modern algorithms on certain computer vision tasks such as object recognition is now close to that of humans. This success was achieved at the price of complicated architectures depending on millions of parameters and it has become quite challenging to understand how particular predictions are made. Interpretability methods propose to give us this understanding. In this paper, we study LIME, perhaps one of the most popular. On the theoretical side, we show that when the number of generated examples is large, LIME explanations are concentrated around a limit explanation for which we give an explicit expression. We further this study for elementary shape detectors and linear models. As a consequence of this analysis, we uncover a connection between LIME and integrated gradients, another explanation method. More precisely, the LIME explanations are similar to the sum of integrated gradients over the superpixels used in the preprocessing step of LIME.", "bibtex": "@InProceedings{pmlr-v139-garreau21a,\n title = \t {What does LIME really see in images?},\n author = {Garreau, Damien and Mardaoui, Dina},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3620--3629},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/garreau21a/garreau21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/garreau21a.html},\n abstract = \t {The performance of modern algorithms on certain computer vision tasks such as object recognition is now close to that of humans. This success was achieved at the price of complicated architectures depending on millions of parameters and it has become quite challenging to understand how particular predictions are made. Interpretability methods propose to give us this understanding. In this paper, we study LIME, perhaps one of the most popular. On the theoretical side, we show that when the number of generated examples is large, LIME explanations are concentrated around a limit explanation for which we give an explicit expression. We further this study for elementary shape detectors and linear models. As a consequence of this analysis, we uncover a connection between LIME and integrated gradients, another explanation method. More precisely, the LIME explanations are similar to the sum of integrated gradients over the superpixels used in the preprocessing step of LIME.}\n}", "pdf": "http://proceedings.mlr.press/v139/garreau21a/garreau21a.pdf", "supp": "", "pdf_size": 6837597, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8275490801192083940&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Universit \u00b4e C \u02c6ote d\u2019Azur, Inria, CNRS, LJAD, France; Polytech Nice", "aff_domain": "univ-cotedazur.fr; ", "email": "univ-cotedazur.fr; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/garreau21a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur;Polytech Nice", "aff_unique_dep": ";", "aff_unique_url": "https://www.univ-cotedazur.fr;https://www.polytech.unice.fr", "aff_unique_abbr": "UniCoast;", "aff_campus_unique_index": "1", "aff_campus_unique": ";Nice", "aff_country_unique_index": "0;0", "aff_country_unique": "France" }, { "title": "What\u2019s in the Box? Exploring the Inner Life of Neural Networks with Robust Rules", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8539", "id": "8539", "proceeding": "http://proceedings.mlr.press/v139/fischer21b.html", "slides": "", "author_site": "Jonas Fischer, Anna Olah, Jilles Vreeken", "author": "Jonas Fischer; Anna Olah; Jilles Vreeken", "abstract": "We propose a novel method for exploring how neurons within neural networks interact. In particular, we consider activation values of a network for given data, and propose to mine noise-robust rules of the form X {\\rightarrow} Y , where X and Y are sets of neurons in different layers. We identify the best set of rules by the Minimum Description Length Principle as the rules that together are most descriptive of the activation data. To learn good rule sets in practice, we propose the unsupervised ExplaiNN algorithm. Extensive evaluation shows that the patterns it discovers give clear insight in how networks perceive the world: they identify shared, respectively class-specific traits, compositionality within the network, as well as locality in convolutional layers. Moreover, these patterns are not only easily interpretable, but also supercharge prototyping as they identify which groups of neurons to consider in unison.", "bibtex": "@InProceedings{pmlr-v139-fischer21b,\n title = \t {What\u2019s in the Box? Exploring the Inner Life of Neural Networks with Robust Rules},\n author = {Fischer, Jonas and Olah, Anna and Vreeken, Jilles},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3352--3362},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/fischer21b/fischer21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/fischer21b.html},\n abstract = \t {We propose a novel method for exploring how neurons within neural networks interact. In particular, we consider activation values of a network for given data, and propose to mine noise-robust rules of the form X {\\rightarrow} Y , where X and Y are sets of neurons in different layers. We identify the best set of rules by the Minimum Description Length Principle as the rules that together are most descriptive of the activation data. To learn good rule sets in practice, we propose the unsupervised ExplaiNN algorithm. Extensive evaluation shows that the patterns it discovers give clear insight in how networks perceive the world: they identify shared, respectively class-specific traits, compositionality within the network, as well as locality in convolutional layers. Moreover, these patterns are not only easily interpretable, but also supercharge prototyping as they identify which groups of neurons to consider in unison.}\n}", "pdf": "http://proceedings.mlr.press/v139/fischer21b/fischer21b.pdf", "supp": "", "pdf_size": 2321288, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10213675563986212565&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Max Planck Institute for Informatics, Germany+1; Max Planck Institute for Informatics, Germany+1; CISPA Helmholtz Center for Information Security, Germany+2", "aff_domain": "mpi-inf.mpg.de; ; ", "email": "mpi-inf.mpg.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/fischer21b.html", "aff_unique_index": "0;0;2", "aff_unique_norm": "Max Planck Institute for Informatics;;CISPA Helmholtz Center for Information Security", "aff_unique_dep": ";;", "aff_unique_url": "https://mpi-inf.mpg.de;;https://www.cispa.de/", "aff_unique_abbr": "MPII;;CISPA", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany;" }, { "title": "When All We Need is a Piece of the Pie: A Generic Framework for Optimizing Two-way Partial AUC", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8431", "id": "8431", "proceeding": "http://proceedings.mlr.press/v139/yang21k.html", "slides": "/media/icml-2021/Slides/8431.pdf", "author_site": "Zhiyong Yang, Qianqian Xu, Shilong Bao, Yuan He, Xiaochun Cao, Qingming Huang", "author": "Zhiyong Yang; Qianqian Xu; Shilong Bao; Yuan He; Xiaochun Cao; Qingming Huang", "abstract": "The Area Under the ROC Curve (AUC) is a crucial metric for machine learning, which evaluates the average performance over all possible True Positive Rates (TPRs) and False Positive Rates (FPRs). Based on the knowledge that a skillful classifier should simultaneously embrace a high TPR and a low FPR, we turn to study a more general variant called Two-way Partial AUC (TPAUC), where only the region with $\\mathsf{TPR} \\ge \\alpha, \\mathsf{FPR} \\le \\beta$ is included in the area. Moreover, a recent work shows that the TPAUC is essentially inconsistent with the existing Partial AUC metrics where only the FPR range is restricted, opening a new problem to seek solutions to leverage high TPAUC. Motivated by this, we present the first trial in this paper to optimize this new metric. The critical challenge along this course lies in the difficulty of performing gradient-based optimization with end-to-end stochastic training, even with a proper choice of surrogate loss. To address this issue, we propose a generic framework to construct surrogate optimization problems, which supports efficient end-to-end training with deep-learning. Moreover, our theoretical analyses show that: 1) the objective function of the surrogate problems will achieve an upper bound of the original problem under mild conditions, and 2) optimizing the surrogate problems leads to good generalization performance in terms of TPAUC with a high probability. Finally, empirical studies over several benchmark datasets speak to the efficacy of our framework.", "bibtex": "@InProceedings{pmlr-v139-yang21k,\n title = \t {When All We Need is a Piece of the Pie: A Generic Framework for Optimizing Two-way Partial AUC},\n author = {Yang, Zhiyong and Xu, Qianqian and Bao, Shilong and He, Yuan and Cao, Xiaochun and Huang, Qingming},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11820--11829},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yang21k/yang21k.pdf},\n url = \t {https://proceedings.mlr.press/v139/yang21k.html},\n abstract = \t {The Area Under the ROC Curve (AUC) is a crucial metric for machine learning, which evaluates the average performance over all possible True Positive Rates (TPRs) and False Positive Rates (FPRs). Based on the knowledge that a skillful classifier should simultaneously embrace a high TPR and a low FPR, we turn to study a more general variant called Two-way Partial AUC (TPAUC), where only the region with $\\mathsf{TPR} \\ge \\alpha, \\mathsf{FPR} \\le \\beta$ is included in the area. Moreover, a recent work shows that the TPAUC is essentially inconsistent with the existing Partial AUC metrics where only the FPR range is restricted, opening a new problem to seek solutions to leverage high TPAUC. Motivated by this, we present the first trial in this paper to optimize this new metric. The critical challenge along this course lies in the difficulty of performing gradient-based optimization with end-to-end stochastic training, even with a proper choice of surrogate loss. To address this issue, we propose a generic framework to construct surrogate optimization problems, which supports efficient end-to-end training with deep-learning. Moreover, our theoretical analyses show that: 1) the objective function of the surrogate problems will achieve an upper bound of the original problem under mild conditions, and 2) optimizing the surrogate problems leads to good generalization performance in terms of TPAUC with a high probability. Finally, empirical studies over several benchmark datasets speak to the efficacy of our framework.}\n}", "pdf": "http://proceedings.mlr.press/v139/yang21k/yang21k.pdf", "supp": "", "pdf_size": 1849388, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7430336993381252540&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/yang21k.html" }, { "title": "When Does Data Augmentation Help With Membership Inference Attacks?", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9601", "id": "9601", "proceeding": "http://proceedings.mlr.press/v139/kaya21a.html", "slides": "/media/icml-2021/Slides/9601.pdf", "author_site": "Yigitcan Kaya, Tudor Dumitras", "author": "Yigitcan Kaya; Tudor Dumitras", "abstract": "Deep learning models often raise privacy concerns as they leak information about their training data. This leakage enables membership inference attacks (MIA) that can identify whether a data point was in a model\u2019s training set. Research shows that some \u2019data augmentation\u2019 mechanisms may reduce the risk by combatting a key factor increasing the leakage, overfitting. While many mechanisms exist, their effectiveness against MIAs and privacy properties have not been studied systematically. Employing two recent MIAs, we explore the lower bound on the risk in the absence of formal upper bounds. First, we evaluate 7 mechanisms and differential privacy, on three image classification tasks. We find that applying augmentation to increase the model\u2019s utility does not mitigate the risk and protection comes with a utility penalty. Further, we also investigate why popular label smoothing mechanism consistently amplifies the risk. Finally, we propose \u2019loss-rank-correlation\u2019 (LRC) metric to assess how similar the effects of different mechanisms are. This, for example, reveals the similarity of applying high-intensity augmentation against MIAs to simply reducing the training time. Our findings emphasize the utility-privacy trade-off and provide practical guidelines on using augmentation to manage the trade-off.", "bibtex": "@InProceedings{pmlr-v139-kaya21a,\n title = \t {When Does Data Augmentation Help With Membership Inference Attacks?},\n author = {Kaya, Yigitcan and Dumitras, Tudor},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {5345--5355},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/kaya21a/kaya21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/kaya21a.html},\n abstract = \t {Deep learning models often raise privacy concerns as they leak information about their training data. This leakage enables membership inference attacks (MIA) that can identify whether a data point was in a model\u2019s training set. Research shows that some \u2019data augmentation\u2019 mechanisms may reduce the risk by combatting a key factor increasing the leakage, overfitting. While many mechanisms exist, their effectiveness against MIAs and privacy properties have not been studied systematically. Employing two recent MIAs, we explore the lower bound on the risk in the absence of formal upper bounds. First, we evaluate 7 mechanisms and differential privacy, on three image classification tasks. We find that applying augmentation to increase the model\u2019s utility does not mitigate the risk and protection comes with a utility penalty. Further, we also investigate why popular label smoothing mechanism consistently amplifies the risk. Finally, we propose \u2019loss-rank-correlation\u2019 (LRC) metric to assess how similar the effects of different mechanisms are. This, for example, reveals the similarity of applying high-intensity augmentation against MIAs to simply reducing the training time. Our findings emphasize the utility-privacy trade-off and provide practical guidelines on using augmentation to manage the trade-off.}\n}", "pdf": "http://proceedings.mlr.press/v139/kaya21a/kaya21a.pdf", "supp": "", "pdf_size": 1358955, "gs_citation": 87, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8095733758395252368&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of Maryland, Maryland, USA; University of Maryland, Maryland, USA", "aff_domain": "umiacs.umd.edu; ", "email": "umiacs.umd.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v139/kaya21a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Maryland", "aff_unique_dep": "", "aff_unique_url": "https://www/umd.edu", "aff_unique_abbr": "UMD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Maryland", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Which transformer architecture fits my data? A vocabulary bottleneck in self-attention", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8763", "id": "8763", "proceeding": "http://proceedings.mlr.press/v139/wies21a.html", "slides": "/media/icml-2021/Slides/8763.pdf", "author_site": "Noam Wies, Yoav Levine, Daniel Jannai, Amnon Shashua", "author": "Noam Wies; Yoav Levine; Daniel Jannai; Amnon Shashua", "abstract": "After their successful debut in natural language processing, Transformer architectures are now becoming the de-facto standard in many domains. An obstacle for their deployment over new modalities is the architectural configuration: the optimal depth-to-width ratio has been shown to dramatically vary across data types (i.e., 10x larger over images than over language). We theoretically predict the existence of an embedding rank bottleneck that limits the contribution of self-attention width to the Transformer expressivity. We thus directly tie the input vocabulary size and rank to the optimal depth-to-width ratio, since a small vocabulary size or rank dictates an added advantage of depth over width. We empirically demonstrate the existence of this bottleneck and its implications on the depth-to-width interplay of Transformer architectures, linking the architecture variability across domains to the often glossed-over usage of different vocabulary sizes or embedding ranks in different domains. As an additional benefit, our rank bottlenecking framework allows us to identify size redundancies of 25%-50% in leading NLP models such as ALBERT and T5.", "bibtex": "@InProceedings{pmlr-v139-wies21a,\n title = \t {Which transformer architecture fits my data? A vocabulary bottleneck in self-attention},\n author = {Wies, Noam and Levine, Yoav and Jannai, Daniel and Shashua, Amnon},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {11170--11181},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wies21a/wies21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wies21a.html},\n abstract = \t {After their successful debut in natural language processing, Transformer architectures are now becoming the de-facto standard in many domains. An obstacle for their deployment over new modalities is the architectural configuration: the optimal depth-to-width ratio has been shown to dramatically vary across data types (i.e., 10x larger over images than over language). We theoretically predict the existence of an embedding rank bottleneck that limits the contribution of self-attention width to the Transformer expressivity. We thus directly tie the input vocabulary size and rank to the optimal depth-to-width ratio, since a small vocabulary size or rank dictates an added advantage of depth over width. We empirically demonstrate the existence of this bottleneck and its implications on the depth-to-width interplay of Transformer architectures, linking the architecture variability across domains to the often glossed-over usage of different vocabulary sizes or embedding ranks in different domains. As an additional benefit, our rank bottlenecking framework allows us to identify size redundancies of 25%-50% in leading NLP models such as ALBERT and T5.}\n}", "pdf": "http://proceedings.mlr.press/v139/wies21a/wies21a.pdf", "supp": "", "pdf_size": 577948, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4283136974185313572&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem; The Hebrew University of Jerusalem", "aff_domain": "cs.huji.ac.il; ; ; ", "email": "cs.huji.ac.il; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/wies21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Hebrew University of Jerusalem", "aff_unique_dep": "", "aff_unique_url": "https://www.huji.ac.il", "aff_unique_abbr": "HUJI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "title": "Whitening and Second Order Optimization Both Make Information in the Dataset Unusable During Training, and Can Reduce or Prevent Generalization", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8717", "id": "8717", "proceeding": "http://proceedings.mlr.press/v139/wadia21a.html", "slides": "/media/icml-2021/Slides/8717.pdf", "author_site": "Neha Wadia, Daniel Duckworth, Samuel Schoenholz, Ethan Dyer, Jascha Sohl-Dickstein", "author": "Neha Wadia; Daniel Duckworth; Samuel S Schoenholz; Ethan Dyer; Jascha Sohl-Dickstein", "abstract": "Machine learning is predicated on the concept of generalization: a model achieving low error on a sufficiently large training set should also perform well on novel samples from the same distribution. We show that both data whitening and second order optimization can harm or entirely prevent generalization. In general, model training harnesses information contained in the sample-sample second moment matrix of a dataset. For a general class of models, namely models with a fully connected first layer, we prove that the information contained in this matrix is the only information which can be used to generalize. Models trained using whitened data, or with certain second order optimization schemes, have less access to this information, resulting in reduced or nonexistent generalization ability. We experimentally verify these predictions for several architectures, and further demonstrate that generalization continues to be harmed even when theoretical requirements are relaxed. However, we also show experimentally that regularized second order optimization can provide a practical tradeoff, where training is accelerated but less information is lost, and generalization can in some circumstances even improve.", "bibtex": "@InProceedings{pmlr-v139-wadia21a,\n title = \t {Whitening and Second Order Optimization Both Make Information in the Dataset Unusable During Training, and Can Reduce or Prevent Generalization},\n author = {Wadia, Neha and Duckworth, Daniel and Schoenholz, Samuel S and Dyer, Ethan and Sohl-Dickstein, Jascha},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10617--10629},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wadia21a/wadia21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wadia21a.html},\n abstract = \t {Machine learning is predicated on the concept of generalization: a model achieving low error on a sufficiently large training set should also perform well on novel samples from the same distribution. We show that both data whitening and second order optimization can harm or entirely prevent generalization. In general, model training harnesses information contained in the sample-sample second moment matrix of a dataset. For a general class of models, namely models with a fully connected first layer, we prove that the information contained in this matrix is the only information which can be used to generalize. Models trained using whitened data, or with certain second order optimization schemes, have less access to this information, resulting in reduced or nonexistent generalization ability. We experimentally verify these predictions for several architectures, and further demonstrate that generalization continues to be harmed even when theoretical requirements are relaxed. However, we also show experimentally that regularized second order optimization can provide a practical tradeoff, where training is accelerated but less information is lost, and generalization can in some circumstances even improve.}\n}", "pdf": "http://proceedings.mlr.press/v139/wadia21a/wadia21a.pdf", "supp": "", "pdf_size": 1879799, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8106385671670624557&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of California, Berkeley; Google Brain; Google Brain; Google Brain; Google Brain", "aff_domain": "berkeley.edu; ; ; ; ", "email": "berkeley.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/wadia21a.html", "aff_unique_index": "0;1;1;1;1", "aff_unique_norm": "University of California, Berkeley;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://www.berkeley.edu;https://brain.google.com", "aff_unique_abbr": "UC Berkeley;Google Brain", "aff_campus_unique_index": "0;1;1;1;1", "aff_campus_unique": "Berkeley;Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Whitening for Self-Supervised Representation Learning", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10241", "id": "10241", "proceeding": "http://proceedings.mlr.press/v139/ermolov21a.html", "slides": "", "author_site": "Aleksandr Ermolov, Aliaksandr Siarohin, Enver Sangineto, Nicu Sebe", "author": "Aleksandr Ermolov; Aliaksandr Siarohin; Enver Sangineto; Nicu Sebe", "abstract": "Most of the current self-supervised representation learning (SSL) methods are based on the contrastive loss and the instance-discrimination task, where augmented versions of the same image instance (\"positives\") are contrasted with instances extracted from other images (\"negatives\"). For the learning to be effective, many negatives should be compared with a positive pair, which is computationally demanding. In this paper, we propose a different direction and a new loss function for SSL, which is based on the whitening of the latent-space features. The whitening operation has a \"scattering\" effect on the batch samples, avoiding degenerate solutions where all the sample representations collapse to a single point. Our solution does not require asymmetric networks and it is conceptually simple. Moreover, since negatives are not needed, we can extract multiple positive pairs from the same image instance. The source code of the method and of all the experiments is available at: https://github.com/htdt/self-supervised.", "bibtex": "@InProceedings{pmlr-v139-ermolov21a,\n title = \t {Whitening for Self-Supervised Representation Learning},\n author = {Ermolov, Aleksandr and Siarohin, Aliaksandr and Sangineto, Enver and Sebe, Nicu},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {3015--3024},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ermolov21a/ermolov21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ermolov21a.html},\n abstract = \t {Most of the current self-supervised representation learning (SSL) methods are based on the contrastive loss and the instance-discrimination task, where augmented versions of the same image instance (\"positives\") are contrasted with instances extracted from other images (\"negatives\"). For the learning to be effective, many negatives should be compared with a positive pair, which is computationally demanding. In this paper, we propose a different direction and a new loss function for SSL, which is based on the whitening of the latent-space features. The whitening operation has a \"scattering\" effect on the batch samples, avoiding degenerate solutions where all the sample representations collapse to a single point. Our solution does not require asymmetric networks and it is conceptually simple. Moreover, since negatives are not needed, we can extract multiple positive pairs from the same image instance. The source code of the method and of all the experiments is available at: https://github.com/htdt/self-supervised.}\n}", "pdf": "http://proceedings.mlr.press/v139/ermolov21a/ermolov21a.pdf", "supp": "", "pdf_size": 1532957, "gs_citation": 378, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14222215050873553089&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Information Engineering and Computer Science (DISI), University of Trento, Italy; Department of Information Engineering and Computer Science (DISI), University of Trento, Italy; Department of Information Engineering and Computer Science (DISI), University of Trento, Italy; Department of Information Engineering and Computer Science (DISI), University of Trento, Italy", "aff_domain": "unitn.it; ; ; ", "email": "unitn.it; ; ; ", "github": "https://github.com/htdt/self-supervised", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ermolov21a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Trento", "aff_unique_dep": "Department of Information Engineering and Computer Science (DISI)", "aff_unique_url": "https://www.unitn.it", "aff_unique_abbr": "UniTN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Italy" }, { "title": "Whittle Networks: A Deep Likelihood Model for Time Series", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10295", "id": "10295", "proceeding": "http://proceedings.mlr.press/v139/yu21c.html", "slides": "/media/icml-2021/Slides/10295.pdf", "author_site": "Zhongjie Yu, Fabrizio Ventola, Kristian Kersting", "author": "Zhongjie Yu; Fabrizio G Ventola; Kristian Kersting", "abstract": "While probabilistic circuits have been extensively explored for tabular data, less attention has been paid to time series. Here, the goal is to estimate joint densities among the entire time series and, in turn, determining, for instance, conditional independence relations between them. To this end, we propose the first probabilistic circuits (PCs) approach for modeling the joint distribution of multivariate time series, called Whittle sum-product networks (WSPNs). WSPNs leverage the Whittle approximation, casting the likelihood in the frequency domain, and place a complex-valued sum-product network, the most prominent PC, over the frequencies. The conditional independence relations among the time series can then be determined efficiently in the spectral domain. Moreover, WSPNs can naturally be placed into the deep neural learning stack for time series, resulting in Whittle Networks, opening the likelihood toolbox for training deep neural models and inspecting their behaviour. Our experiments show that Whittle Networks can indeed capture complex dependencies between time series and provide a useful measure of uncertainty for neural networks.", "bibtex": "@InProceedings{pmlr-v139-yu21c,\n title = \t {Whittle Networks: A Deep Likelihood Model for Time Series},\n author = {Yu, Zhongjie and Ventola, Fabrizio G and Kersting, Kristian},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12177--12186},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/yu21c/yu21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/yu21c.html},\n abstract = \t {While probabilistic circuits have been extensively explored for tabular data, less attention has been paid to time series. Here, the goal is to estimate joint densities among the entire time series and, in turn, determining, for instance, conditional independence relations between them. To this end, we propose the first probabilistic circuits (PCs) approach for modeling the joint distribution of multivariate time series, called Whittle sum-product networks (WSPNs). WSPNs leverage the Whittle approximation, casting the likelihood in the frequency domain, and place a complex-valued sum-product network, the most prominent PC, over the frequencies. The conditional independence relations among the time series can then be determined efficiently in the spectral domain. Moreover, WSPNs can naturally be placed into the deep neural learning stack for time series, resulting in Whittle Networks, opening the likelihood toolbox for training deep neural models and inspecting their behaviour. Our experiments show that Whittle Networks can indeed capture complex dependencies between time series and provide a useful measure of uncertainty for neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v139/yu21c/yu21c.pdf", "supp": "", "pdf_size": 2273987, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=795783047782065575&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, TU Darmstadt, Darmstadt, Germany+Centre for Cognitive Science, TU Darmstadt, and Hessian Center for AI (hessian.AI); Department of Computer Science, TU Darmstadt, Darmstadt, Germany+Centre for Cognitive Science, TU Darmstadt, and Hessian Center for AI (hessian.AI); Department of Computer Science, TU Darmstadt, Darmstadt, Germany+Centre for Cognitive Science, TU Darmstadt, and Hessian Center for AI (hessian.AI)", "aff_domain": "cs.tu-darmstadt.de; ; ", "email": "cs.tu-darmstadt.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/yu21c.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "TU Darmstadt;Technische Universit\u00e4t Darmstadt", "aff_unique_dep": "Department of Computer Science;Centre for Cognitive Science", "aff_unique_url": "https://www.tu-darmstadt.de;https://www.tu-darmstadt.de", "aff_unique_abbr": "TUD;TU Darmstadt", "aff_campus_unique_index": "0+0;0+0;0+0", "aff_campus_unique": "Darmstadt", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Germany" }, { "title": "Winograd Algorithm for AdderNet", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8909", "id": "8909", "proceeding": "http://proceedings.mlr.press/v139/li21c.html", "slides": "/media/icml-2021/Slides/8909.pdf", "author_site": "Wenshuo Li, Hanting Chen, Mingqiang Huang, Xinghao Chen, Chunjing Xu, Yunhe Wang", "author": "Wenshuo Li; Hanting Chen; Mingqiang Huang; Xinghao Chen; Chunjing Xu; Yunhe Wang", "abstract": "Adder neural network (AdderNet) is a new kind of deep model that replaces the original massive multiplications in convolutions by additions while preserving the high performance. Since the hardware complexity of additions is much lower than that of multiplications, the overall energy consumption is thus reduced significantly. To further optimize the hardware overhead of using AdderNet, this paper studies the winograd algorithm, which is a widely used fast algorithm for accelerating convolution and saving the computational costs. Unfortunately, the conventional Winograd algorithm cannot be directly applied to AdderNets since the distributive law in multiplication is not valid for the l1-norm. Therefore, we replace the element-wise multiplication in the Winograd equation by additions and then develop a new set of transform matrixes that can enhance the representation ability of output features to maintain the performance. Moreover, we propose the l2-to-l1 training strategy to mitigate the negative impacts caused by formal inconsistency. Experimental results on both FPGA and benchmarks show that the new method can further reduce the energy consumption without affecting the accuracy of the original AdderNet.", "bibtex": "@InProceedings{pmlr-v139-li21c,\n title = \t {Winograd Algorithm for AdderNet},\n author = {Li, Wenshuo and Chen, Hanting and Huang, Mingqiang and Chen, Xinghao and Xu, Chunjing and Wang, Yunhe},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {6307--6315},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/li21c/li21c.pdf},\n url = \t {https://proceedings.mlr.press/v139/li21c.html},\n abstract = \t {Adder neural network (AdderNet) is a new kind of deep model that replaces the original massive multiplications in convolutions by additions while preserving the high performance. Since the hardware complexity of additions is much lower than that of multiplications, the overall energy consumption is thus reduced significantly. To further optimize the hardware overhead of using AdderNet, this paper studies the winograd algorithm, which is a widely used fast algorithm for accelerating convolution and saving the computational costs. Unfortunately, the conventional Winograd algorithm cannot be directly applied to AdderNets since the distributive law in multiplication is not valid for the l1-norm. Therefore, we replace the element-wise multiplication in the Winograd equation by additions and then develop a new set of transform matrixes that can enhance the representation ability of output features to maintain the performance. Moreover, we propose the l2-to-l1 training strategy to mitigate the negative impacts caused by formal inconsistency. Experimental results on both FPGA and benchmarks show that the new method can further reduce the energy consumption without affecting the accuracy of the original AdderNet.}\n}", "pdf": "http://proceedings.mlr.press/v139/li21c/li21c.pdf", "supp": "", "pdf_size": 840439, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3622798303887552169&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Noah\u2019s Ark Lab, Huawei Technologies; Noah\u2019s Ark Lab, Huawei Technologies + Peking University; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences; Noah\u2019s Ark Lab, Huawei Technologies; Noah\u2019s Ark Lab, Huawei Technologies; Noah\u2019s Ark Lab, Huawei Technologies", "aff_domain": "huawei.com; ; ; ; ;huawei.com", "email": "huawei.com; ; ; ; ;huawei.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/li21c.html", "aff_unique_index": "0;0+1;2;0;0;0", "aff_unique_norm": "Huawei;Peking University;Chinese Academy of Sciences", "aff_unique_dep": "Noah\u2019s Ark Lab;;Shenzhen Institutes of Advanced Technology", "aff_unique_url": "https://www.huawei.com;http://www.pku.edu.cn;http://www.siat.cas.cn", "aff_unique_abbr": "Huawei;Peking U;SIAT", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Shenzhen", "aff_country_unique_index": "0;0+0;0;0;0;0", "aff_country_unique": "China" }, { "title": "World Model as a Graph: Learning Latent Landmarks for Planning", "status": "Oral", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10063", "id": "10063", "proceeding": "http://proceedings.mlr.press/v139/zhang21x.html", "slides": "/media/icml-2021/Slides/10063.pdf", "author_site": "Lunjun Zhang, Ge Yang, Bradly Stadie", "author": "Lunjun Zhang; Ge Yang; Bradly C Stadie", "abstract": "Planning, the ability to analyze the structure of a problem in the large and decompose it into interrelated subproblems, is a hallmark of human intelligence. While deep reinforcement learning (RL) has shown great promise for solving relatively straightforward control tasks, it remains an open problem how to best incorporate planning into existing deep RL paradigms to handle increasingly complex environments. One prominent framework, Model-Based RL, learns a world model and plans using step-by-step virtual rollouts. This type of world model quickly diverges from reality when the planning horizon increases, thus struggling at long-horizon planning. How can we learn world models that endow agents with the ability to do temporally extended reasoning? In this work, we propose to learn graph-structured world models composed of sparse, multi-step transitions. We devise a novel algorithm to learn latent landmarks that are scattered (in terms of reachability) across the goal space as the nodes on the graph. In this same graph, the edges are the reachability estimates distilled from Q-functions. On a variety of high-dimensional continuous control tasks ranging from robotic manipulation to navigation, we demonstrate that our method, named L3P, significantly outperforms prior work, and is oftentimes the only method capable of leveraging both the robustness of model-free RL and generalization of graph-search algorithms. We believe our work is an important step towards scalable planning in reinforcement learning.", "bibtex": "@InProceedings{pmlr-v139-zhang21x,\n title = \t {World Model as a Graph: Learning Latent Landmarks for Planning},\n author = {Zhang, Lunjun and Yang, Ge and Stadie, Bradly C},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12611--12620},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21x/zhang21x.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21x.html},\n abstract = \t {Planning, the ability to analyze the structure of a problem in the large and decompose it into interrelated subproblems, is a hallmark of human intelligence. While deep reinforcement learning (RL) has shown great promise for solving relatively straightforward control tasks, it remains an open problem how to best incorporate planning into existing deep RL paradigms to handle increasingly complex environments. One prominent framework, Model-Based RL, learns a world model and plans using step-by-step virtual rollouts. This type of world model quickly diverges from reality when the planning horizon increases, thus struggling at long-horizon planning. How can we learn world models that endow agents with the ability to do temporally extended reasoning? In this work, we propose to learn graph-structured world models composed of sparse, multi-step transitions. We devise a novel algorithm to learn latent landmarks that are scattered (in terms of reachability) across the goal space as the nodes on the graph. In this same graph, the edges are the reachability estimates distilled from Q-functions. On a variety of high-dimensional continuous control tasks ranging from robotic manipulation to navigation, we demonstrate that our method, named L3P, significantly outperforms prior work, and is oftentimes the only method capable of leveraging both the robustness of model-free RL and generalization of graph-search algorithms. We believe our work is an important step towards scalable planning in reinforcement learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21x/zhang21x.pdf", "supp": "", "pdf_size": 3572444, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11617385762396360333&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Toronto + Vector Institute; MIT; Toyota Technological Institute at Chicago", "aff_domain": "cs.toronto.edu; ; ", "email": "cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/zhang21x.html", "aff_unique_index": "0+1;2;3", "aff_unique_norm": "University of Toronto;Vector Institute;Massachusetts Institute of Technology;Toyota Technological Institute at Chicago", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://web.mit.edu;https://www.tti-chicago.org", "aff_unique_abbr": "U of T;Vector Institute;MIT;TTI Chicago", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Chicago", "aff_country_unique_index": "0+0;1;1", "aff_country_unique": "Canada;United States" }, { "title": "XOR-CD: Linearly Convergent Constrained Structure Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9143", "id": "9143", "proceeding": "http://proceedings.mlr.press/v139/ding21a.html", "slides": "", "author_site": "Fan Ding, Jianzhu Ma, Jinbo Xu, Yexiang Xue", "author": "Fan Ding; Jianzhu Ma; Jinbo Xu; Yexiang Xue", "abstract": "We propose XOR-Contrastive Divergence learning (XOR-CD), a provable approach for constrained structure generation, which remains difficult for state-of-the-art neural network and constraint reasoning approaches. XOR-CD harnesses XOR-Sampling to generate samples from the model distribution in CD learning and is guaranteed to generate valid structures. In addition, XOR-CD has a linear convergence rate towards the global maximum of the likelihood function within a vanishing constant in learning exponential family models. Constraint satisfaction enabled by XOR-CD also boosts its learning performance. Our real-world experiments on data-driven experimental design, dispatching route generation, and sequence-based protein homology detection demonstrate the superior performance of XOR-CD compared to baseline approaches in generating valid structures as well as capturing the inductive bias in the training set.", "bibtex": "@InProceedings{pmlr-v139-ding21a,\n title = \t {XOR-CD: Linearly Convergent Constrained Structure Generation},\n author = {Ding, Fan and Ma, Jianzhu and Xu, Jinbo and Xue, Yexiang},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {2728--2738},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ding21a/ding21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ding21a.html},\n abstract = \t {We propose XOR-Contrastive Divergence learning (XOR-CD), a provable approach for constrained structure generation, which remains difficult for state-of-the-art neural network and constraint reasoning approaches. XOR-CD harnesses XOR-Sampling to generate samples from the model distribution in CD learning and is guaranteed to generate valid structures. In addition, XOR-CD has a linear convergence rate towards the global maximum of the likelihood function within a vanishing constant in learning exponential family models. Constraint satisfaction enabled by XOR-CD also boosts its learning performance. Our real-world experiments on data-driven experimental design, dispatching route generation, and sequence-based protein homology detection demonstrate the superior performance of XOR-CD compared to baseline approaches in generating valid structures as well as capturing the inductive bias in the training set.}\n}", "pdf": "http://proceedings.mlr.press/v139/ding21a/ding21a.pdf", "supp": "", "pdf_size": 880763, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2279388089886972456&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Purdue University, West Lafayette, USA; Institute for Artificial Intelligence, Peking University, Beijing, China; Toyota Technological Institute at Chicago, Illinois, USA; Department of Computer Science, Purdue University, West Lafayette, USA", "aff_domain": "purdue.edu; ; ;purdue.edu", "email": "purdue.edu; ; ;purdue.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/ding21a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Purdue University;Peking University;Toyota Technological Institute at Chicago", "aff_unique_dep": "Department of Computer Science;Institute for Artificial Intelligence;", "aff_unique_url": "https://www.purdue.edu;http://www.pku.edu.cn;https://www.tti-chicago.org", "aff_unique_abbr": "Purdue;PKU;TTI Chicago", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "West Lafayette;Beijing;Chicago", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;China" }, { "title": "You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9755", "id": "9755", "proceeding": "http://proceedings.mlr.press/v139/zeng21a.html", "slides": "", "author_site": "Zhanpeng Zeng, Yunyang Xiong, Sathya Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh", "author": "Zhanpeng Zeng; Yunyang Xiong; Sathya Ravi; Shailesh Acharya; Glenn M Fung; Vikas Singh", "abstract": "Transformer-based models are widely used in natural language processing (NLP). Central to the transformer model is the self-attention mechanism, which captures the interactions of token pairs in the input sequences and depends quadratically on the sequence length. Training such models on longer sequences is expensive. In this paper, we show that a Bernoulli sampling attention mechanism based on Locality Sensitive Hashing (LSH), decreases the quadratic complexity of such models to linear. We bypass the quadratic cost by considering self-attention as a sum of individual tokens associated with Bernoulli random variables that can, in principle, be sampled at once by a single hash (although in practice, this number may be a small constant). This leads to an efficient sampling scheme to estimate self-attention which relies on specific modifications of LSH (to enable deployment on GPU architectures). We evaluate our algorithm on the GLUE benchmark with standard 512 sequence length where we see favorable performance relative to a standard pretrained Transformer. On the Long Range Arena (LRA) benchmark, for evaluating performance on long sequences, our method achieves results consistent with softmax self-attention but with sizable speed-ups and memory savings and often outperforms other efficient self-attention methods. Our code is available at https://github.com/mlpen/YOSO.", "bibtex": "@InProceedings{pmlr-v139-zeng21a,\n title = \t {You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling},\n author = {Zeng, Zhanpeng and Xiong, Yunyang and Ravi, Sathya and Acharya, Shailesh and Fung, Glenn M and Singh, Vikas},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12321--12332},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zeng21a/zeng21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/zeng21a.html},\n abstract = \t {Transformer-based models are widely used in natural language processing (NLP). Central to the transformer model is the self-attention mechanism, which captures the interactions of token pairs in the input sequences and depends quadratically on the sequence length. Training such models on longer sequences is expensive. In this paper, we show that a Bernoulli sampling attention mechanism based on Locality Sensitive Hashing (LSH), decreases the quadratic complexity of such models to linear. We bypass the quadratic cost by considering self-attention as a sum of individual tokens associated with Bernoulli random variables that can, in principle, be sampled at once by a single hash (although in practice, this number may be a small constant). This leads to an efficient sampling scheme to estimate self-attention which relies on specific modifications of LSH (to enable deployment on GPU architectures). We evaluate our algorithm on the GLUE benchmark with standard 512 sequence length where we see favorable performance relative to a standard pretrained Transformer. On the Long Range Arena (LRA) benchmark, for evaluating performance on long sequences, our method achieves results consistent with softmax self-attention but with sizable speed-ups and memory savings and often outperforms other efficient self-attention methods. Our code is available at https://github.com/mlpen/YOSO.}\n}", "pdf": "http://proceedings.mlr.press/v139/zeng21a/zeng21a.pdf", "supp": "", "pdf_size": 1877465, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11877607783928250360&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Wisconsin, Madison, USA; University of Wisconsin, Madison, USA; University of Illinois, Chicago, USA; American Family Insurance, Madison, USA; American Family Insurance, Madison, USA; University of Wisconsin, Madison, USA", "aff_domain": "wisc.edu; ; ; ; ; ", "email": "wisc.edu; ; ; ; ; ", "github": "https://github.com/mlpen/YOSO", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zeng21a.html", "aff_unique_index": "0;0;1;2;2;0", "aff_unique_norm": "University of Wisconsin-Madison;University of Illinois at Chicago;American Family Insurance", "aff_unique_dep": ";;", "aff_unique_url": "https://www.wisc.edu;https://www.uic.edu;https://www.amfam.com", "aff_unique_abbr": "UW-Madison;UIC;", "aff_campus_unique_index": "0;0;1;0;0;0", "aff_campus_unique": "Madison;Chicago", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Z-GCNETs: Time Zigzags at Graph Convolutional Networks for Time Series Forecasting", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9391", "id": "9391", "proceeding": "http://proceedings.mlr.press/v139/chen21o.html", "slides": "", "author_site": "Yuzhou Chen, Ignacio Segovia Dominguez, Yulia R Gel", "author": "Yuzhou Chen; Ignacio Segovia; Yulia R. Gel", "abstract": "There recently has been a surge of interest in developing a new class of deep learning (DL) architectures that integrate an explicit time dimension as a fundamental building block of learning and representation mechanisms. In turn, many recent results show that topological descriptors of the observed data, encoding information on the shape of the dataset in a topological space at different scales, that is, persistent homology of the data, may contain important complementary information, improving both performance and robustness of DL. As convergence of these two emerging ideas, we propose to enhance DL architectures with the most salient time-conditioned topological information of the data and introduce the concept of zigzag persistence into time-aware graph convolutional networks (GCNs). Zigzag persistence provides a systematic and mathematically rigorous framework to track the most important topological features of the observed data that tend to manifest themselves over time. To integrate the extracted time-conditioned topological descriptors into DL, we develop a new topological summary, zigzag persistence image, and derive its theoretical stability guarantees. We validate the new GCNs with a time-aware zigzag topological layer (Z-GCNETs), in application to traffic forecasting and Ethereum blockchain price prediction. Our results indicate that Z-GCNET outperforms 13 state-of-the-art methods on 4 time series datasets.", "bibtex": "@InProceedings{pmlr-v139-chen21o,\n title = \t {Z-GCNETs: Time Zigzags at Graph Convolutional Networks for Time Series Forecasting},\n author = {Chen, Yuzhou and Segovia, Ignacio and Gel, Yulia R.},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {1684--1694},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/chen21o/chen21o.pdf},\n url = \t {https://proceedings.mlr.press/v139/chen21o.html},\n abstract = \t {There recently has been a surge of interest in developing a new class of deep learning (DL) architectures that integrate an explicit time dimension as a fundamental building block of learning and representation mechanisms. In turn, many recent results show that topological descriptors of the observed data, encoding information on the shape of the dataset in a topological space at different scales, that is, persistent homology of the data, may contain important complementary information, improving both performance and robustness of DL. As convergence of these two emerging ideas, we propose to enhance DL architectures with the most salient time-conditioned topological information of the data and introduce the concept of zigzag persistence into time-aware graph convolutional networks (GCNs). Zigzag persistence provides a systematic and mathematically rigorous framework to track the most important topological features of the observed data that tend to manifest themselves over time. To integrate the extracted time-conditioned topological descriptors into DL, we develop a new topological summary, zigzag persistence image, and derive its theoretical stability guarantees. We validate the new GCNs with a time-aware zigzag topological layer (Z-GCNETs), in application to traffic forecasting and Ethereum blockchain price prediction. Our results indicate that Z-GCNET outperforms 13 state-of-the-art methods on 4 time series datasets.}\n}", "pdf": "http://proceedings.mlr.press/v139/chen21o/chen21o.pdf", "supp": "", "pdf_size": 932092, "gs_citation": 183, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7480163184753342890&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Statistical Science, Southern Methodist University, TX, USA+Energy Storage & Distributed Resources Division, Lawrence Berkeley National Laboratory, CA, USA; Department of Mathematical Sciences, University of Texas at Dallas, TX, USA+NASA Jet Propulsion Laboratory, CA, USA; Department of Mathematical Sciences, University of Texas at Dallas, TX, USA", "aff_domain": "smu.edu;utdallas.edu;utdallas.edu", "email": "smu.edu;utdallas.edu;utdallas.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v139/chen21o.html", "aff_unique_index": "0+1;2+3;2", "aff_unique_norm": "Southern Methodist University;Lawrence Berkeley National Laboratory;University of Texas at Dallas;NASA Jet Propulsion Laboratory", "aff_unique_dep": "Department of Statistical Science;Energy Storage & Distributed Resources Division;Department of Mathematical Sciences;", "aff_unique_url": "https://www.smu.edu;https://www.lbl.gov;https://www.utdallas.edu;https://www.jpl.nasa.gov", "aff_unique_abbr": "SMU;LBL;UT Dallas;JPL", "aff_campus_unique_index": "0+1;0+2;0", "aff_campus_unique": "Dallas;Berkeley;Pasadena", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Zero-Shot Knowledge Distillation from a Decision-Based Black-Box Model", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10257", "id": "10257", "proceeding": "http://proceedings.mlr.press/v139/wang21a.html", "slides": "", "author": "Zi Wang", "abstract": "Knowledge distillation (KD) is a successful approach for deep neural network acceleration, with which a compact network (student) is trained by mimicking the softmax output of a pre-trained high-capacity network (teacher). In tradition, KD usually relies on access to the training samples and the parameters of the white-box teacher to acquire the transferred knowledge. However, these prerequisites are not always realistic due to storage costs or privacy issues in real-world applications. Here we propose the concept of decision-based black-box (DB3) knowledge distillation, with which the student is trained by distilling the knowledge from a black-box teacher (parameters are not accessible) that only returns classes rather than softmax outputs. We start with the scenario when the training set is accessible. We represent a sample\u2019s robustness against other classes by computing its distances to the teacher\u2019s decision boundaries and use it to construct the soft label for each training sample. After that, the student can be trained via standard KD. We then extend this approach to a more challenging scenario in which even accessing the training data is not feasible. We propose to generate pseudo samples that are distinguished by the decision boundaries of the DB3 teacher to the largest extent and construct soft labels for these samples, which are used as the transfer set. We evaluate our approaches on various benchmark networks and datasets and experiment results demonstrate their effectiveness.", "bibtex": "@InProceedings{pmlr-v139-wang21a,\n title = \t {Zero-Shot Knowledge Distillation from a Decision-Based Black-Box Model},\n author = {Wang, Zi},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {10675--10685},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/wang21a/wang21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/wang21a.html},\n abstract = \t {Knowledge distillation (KD) is a successful approach for deep neural network acceleration, with which a compact network (student) is trained by mimicking the softmax output of a pre-trained high-capacity network (teacher). In tradition, KD usually relies on access to the training samples and the parameters of the white-box teacher to acquire the transferred knowledge. However, these prerequisites are not always realistic due to storage costs or privacy issues in real-world applications. Here we propose the concept of decision-based black-box (DB3) knowledge distillation, with which the student is trained by distilling the knowledge from a black-box teacher (parameters are not accessible) that only returns classes rather than softmax outputs. We start with the scenario when the training set is accessible. We represent a sample\u2019s robustness against other classes by computing its distances to the teacher\u2019s decision boundaries and use it to construct the soft label for each training sample. After that, the student can be trained via standard KD. We then extend this approach to a more challenging scenario in which even accessing the training data is not feasible. We propose to generate pseudo samples that are distinguished by the decision boundaries of the DB3 teacher to the largest extent and construct soft labels for these samples, which are used as the transfer set. We evaluate our approaches on various benchmark networks and datasets and experiment results demonstrate their effectiveness.}\n}", "pdf": "http://proceedings.mlr.press/v139/wang21a/wang21a.pdf", "supp": "", "pdf_size": 3702626, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7908835679548457764&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Electrical Engineering and Computer Science, The University of Tennessee, Knoxville, TN, USA", "aff_domain": "vols.utk.edu", "email": "vols.utk.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v139/wang21a.html", "aff_unique_index": "0", "aff_unique_norm": "University of Tennessee", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.utk.edu", "aff_unique_abbr": "UT Knoxville", "aff_campus_unique_index": "0", "aff_campus_unique": "Knoxville", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Zero-Shot Text-to-Image Generation", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9429", "id": "9429", "proceeding": "http://proceedings.mlr.press/v139/ramesh21a.html", "slides": "", "author_site": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, Ilya Sutskever", "author": "Aditya Ramesh; Mikhail Pavlov; Gabriel Goh; Scott Gray; Chelsea Voss; Alec Radford; Mark Chen; Ilya Sutskever", "abstract": "Text-to-image generation has traditionally focused on finding better modeling assumptions for training on a fixed dataset. These assumptions might involve complex architectures, auxiliary losses, or side information such as object part labels or segmentation masks supplied during training. We describe a simple approach for this task based on a transformer that autoregressively models the text and image tokens as a single stream of data. With sufficient data and scale, our approach is competitive with previous domain-specific models when evaluated in a zero-shot fashion.", "bibtex": "@InProceedings{pmlr-v139-ramesh21a,\n title = \t {Zero-Shot Text-to-Image Generation},\n author = {Ramesh, Aditya and Pavlov, Mikhail and Goh, Gabriel and Gray, Scott and Voss, Chelsea and Radford, Alec and Chen, Mark and Sutskever, Ilya},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {8821--8831},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/ramesh21a/ramesh21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/ramesh21a.html},\n abstract = \t {Text-to-image generation has traditionally focused on finding better modeling assumptions for training on a fixed dataset. These assumptions might involve complex architectures, auxiliary losses, or side information such as object part labels or segmentation masks supplied during training. We describe a simple approach for this task based on a transformer that autoregressively models the text and image tokens as a single stream of data. With sufficient data and scale, our approach is competitive with previous domain-specific models when evaluated in a zero-shot fashion.}\n}", "pdf": "http://proceedings.mlr.press/v139/ramesh21a/ramesh21a.pdf", "supp": "", "pdf_size": 6046235, "gs_citation": 6333, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18428055834209091582&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI; OpenAI", "aff_domain": "adityaramesh.com; ; ; ; ; ; ; ", "email": "adityaramesh.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v139/ramesh21a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "OpenAI", "aff_unique_dep": "", "aff_unique_url": "https://openai.com", "aff_unique_abbr": "OpenAI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Zeroth-Order Non-Convex Learning via Hierarchical Dual Averaging", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/10737", "id": "10737", "proceeding": "http://proceedings.mlr.press/v139/heliou21a.html", "slides": "/media/icml-2021/Slides/10737.pdf", "author_site": "Am\u00e9lie H\u00e9liou, Matthieu Martin, Panayotis Mertikopoulos, Thibaud J Rahier", "author": "Am\u00e9lie H\u00e9liou; Matthieu Martin; Panayotis Mertikopoulos; Thibaud Rahier", "abstract": "We propose a hierarchical version of dual averaging for zeroth-order online non-convex optimization {\u2013} i.e., learning processes where, at each stage, the optimizer is facing an unknown non-convex loss function and only receives the incurred loss as feedback. The proposed class of policies relies on the construction of an online model that aggregates loss information as it arrives, and it consists of two principal components: (a) a regularizer adapted to the Fisher information metric (as opposed to the metric norm of the ambient space); and (b) a principled exploration of the problem\u2019s state space based on an adapted hierarchical schedule. This construction enables sharper control of the model\u2019s bias and variance, and allows us to derive tight bounds for both the learner\u2019s static and dynamic regret {\u2013} i.e., the regret incurred against the best dynamic policy in hindsight over the horizon of play.", "bibtex": "@InProceedings{pmlr-v139-heliou21a,\n title = \t {Zeroth-Order Non-Convex Learning via Hierarchical Dual Averaging},\n author = {H{\\'e}liou, Am{\\'e}lie and Martin, Matthieu and Mertikopoulos, Panayotis and Rahier, Thibaud},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {4192--4202},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/heliou21a/heliou21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/heliou21a.html},\n abstract = \t {We propose a hierarchical version of dual averaging for zeroth-order online non-convex optimization {\u2013} i.e., learning processes where, at each stage, the optimizer is facing an unknown non-convex loss function and only receives the incurred loss as feedback. The proposed class of policies relies on the construction of an online model that aggregates loss information as it arrives, and it consists of two principal components: (a) a regularizer adapted to the Fisher information metric (as opposed to the metric norm of the ambient space); and (b) a principled exploration of the problem\u2019s state space based on an adapted hierarchical schedule. This construction enables sharper control of the model\u2019s bias and variance, and allows us to derive tight bounds for both the learner\u2019s static and dynamic regret {\u2013} i.e., the regret incurred against the best dynamic policy in hindsight over the horizon of play.}\n}", "pdf": "http://proceedings.mlr.press/v139/heliou21a/heliou21a.pdf", "supp": "", "pdf_size": 411226, "gs_citation": 16, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5813472955967220826&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Criteo AI Lab; Criteo AI Lab; Univ. Grenoble Alpes, CNRS, Inria, Grenoble INP, LIG, 38000 Grenoble, France; Criteo AI Lab", "aff_domain": "criteo.com;criteo.com;imag.fr;criteo.com", "email": "criteo.com;criteo.com;imag.fr;criteo.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/heliou21a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Criteo;Universite Grenoble Alpes", "aff_unique_dep": "Criteo AI Lab;", "aff_unique_url": "https://www.criteo.com;https://www.univ-grenoble-alpes.fr", "aff_unique_abbr": "Criteo;UGA", "aff_campus_unique_index": "1", "aff_campus_unique": ";Grenoble", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "France" }, { "title": "Zoo-Tuning: Adaptive Transfer from A Zoo of Models", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9887", "id": "9887", "proceeding": "http://proceedings.mlr.press/v139/shu21b.html", "slides": "/media/icml-2021/Slides/9887.pdf", "author_site": "Yang Shu, Zhi Kou, Zhangjie Cao, Jianmin Wang, Mingsheng Long", "author": "Yang Shu; Zhi Kou; Zhangjie Cao; Jianmin Wang; Mingsheng Long", "abstract": "With the development of deep networks on various large-scale datasets, a large zoo of pretrained models are available. When transferring from a model zoo, applying classic single-model-based transfer learning methods to each source model suffers from high computational cost and cannot fully utilize the rich knowledge in the zoo. We propose \\emph{Zoo-Tuning} to address these challenges, which learns to adaptively transfer the parameters of pretrained models to the target task. With the learnable channel alignment layer and adaptive aggregation layer, Zoo-Tuning \\emph{adaptively aggregates channel aligned pretrained parameters to derive the target model}, which simultaneously promotes knowledge transfer and adapts source models to downstream tasks. The adaptive aggregation substantially reduces the computation cost at both training and inference. We further propose lite Zoo-Tuning with the temporal ensemble of batch average gating values to reduce the storage cost at the inference time. We evaluate our approach on a variety of tasks, including reinforcement learning, image classification, and facial landmark detection. Experiment results demonstrate that the proposed adaptive transfer learning approach can more effectively and efficiently transfer knowledge from a zoo of models.", "bibtex": "@InProceedings{pmlr-v139-shu21b,\n title = \t {Zoo-Tuning: Adaptive Transfer from A Zoo of Models},\n author = {Shu, Yang and Kou, Zhi and Cao, Zhangjie and Wang, Jianmin and Long, Mingsheng},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {9626--9637},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/shu21b/shu21b.pdf},\n url = \t {https://proceedings.mlr.press/v139/shu21b.html},\n abstract = \t {With the development of deep networks on various large-scale datasets, a large zoo of pretrained models are available. When transferring from a model zoo, applying classic single-model-based transfer learning methods to each source model suffers from high computational cost and cannot fully utilize the rich knowledge in the zoo. We propose \\emph{Zoo-Tuning} to address these challenges, which learns to adaptively transfer the parameters of pretrained models to the target task. With the learnable channel alignment layer and adaptive aggregation layer, Zoo-Tuning \\emph{adaptively aggregates channel aligned pretrained parameters to derive the target model}, which simultaneously promotes knowledge transfer and adapts source models to downstream tasks. The adaptive aggregation substantially reduces the computation cost at both training and inference. We further propose lite Zoo-Tuning with the temporal ensemble of batch average gating values to reduce the storage cost at the inference time. We evaluate our approach on a variety of tasks, including reinforcement learning, image classification, and facial landmark detection. Experiment results demonstrate that the proposed adaptive transfer learning approach can more effectively and efficiently transfer knowledge from a zoo of models.}\n}", "pdf": "http://proceedings.mlr.press/v139/shu21b/shu21b.pdf", "supp": "", "pdf_size": 1961660, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4486277702250768083&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University; School of Software, BNRist, Tsinghua University", "aff_domain": "mails.tsinghua.edu.cn; ; ; ;tsinghua.edu.cn", "email": "mails.tsinghua.edu.cn; ; ; ;tsinghua.edu.cn", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v139/shu21b.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "School of Software", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "China" }, { "title": "f-Domain Adversarial Learning: Theory and Algorithms", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/9515", "id": "9515", "proceeding": "http://proceedings.mlr.press/v139/acuna21a.html", "slides": "/media/icml-2021/Slides/9515.pdf", "author_site": "David Acuna, Guojun Zhang, Marc Law, Sanja Fidler", "author": "David Acuna; Guojun Zhang; Marc T. Law; Sanja Fidler", "abstract": "Unsupervised domain adaptation is used in many machine learning applications where, during training, a model has access to unlabeled data in the target domain, and a related labeled dataset. In this paper, we introduce a novel and general domain-adversarial framework. Specifically, we derive a novel generalization bound for domain adaptation that exploits a new measure of discrepancy between distributions based on a variational characterization of f-divergences. It recovers the theoretical results from Ben-David et al. (2010a) as a special case and supports divergences used in practice. Based on this bound, we derive a new algorithmic framework that introduces a key correction in the original adversarial training method of Ganin et al. (2016). We show that many regularizers and ad-hoc objectives introduced over the last years in this framework are then not required to achieve performance comparable to (if not better than) state-of-the-art domain-adversarial methods. Experimental analysis conducted on real-world natural language and computer vision datasets show that our framework outperforms existing baselines, and obtains the best results for f-divergences that were not considered previously in domain-adversarial learning.", "bibtex": "@InProceedings{pmlr-v139-acuna21a,\n title = \t {f-Domain Adversarial Learning: Theory and Algorithms},\n author = {Acuna, David and Zhang, Guojun and Law, Marc T. and Fidler, Sanja},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {66--75},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/acuna21a/acuna21a.pdf},\n url = \t {https://proceedings.mlr.press/v139/acuna21a.html},\n abstract = \t {Unsupervised domain adaptation is used in many machine learning applications where, during training, a model has access to unlabeled data in the target domain, and a related labeled dataset. In this paper, we introduce a novel and general domain-adversarial framework. Specifically, we derive a novel generalization bound for domain adaptation that exploits a new measure of discrepancy between distributions based on a variational characterization of f-divergences. It recovers the theoretical results from Ben-David et al. (2010a) as a special case and supports divergences used in practice. Based on this bound, we derive a new algorithmic framework that introduces a key correction in the original adversarial training method of Ganin et al. (2016). We show that many regularizers and ad-hoc objectives introduced over the last years in this framework are then not required to achieve performance comparable to (if not better than) state-of-the-art domain-adversarial methods. Experimental analysis conducted on real-world natural language and computer vision datasets show that our framework outperforms existing baselines, and obtains the best results for f-divergences that were not considered previously in domain-adversarial learning.}\n}", "pdf": "http://proceedings.mlr.press/v139/acuna21a/acuna21a.pdf", "supp": "", "pdf_size": 835553, "gs_citation": 88, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4384670192135245187&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "NVIDIA+University of Toronto+Vector Institute; University of Waterloo+University of Toronto+Vector Institute; NVIDIA; NVIDIA+University of Toronto+Vector Institute", "aff_domain": "cs.toronto.edu;nvidia.com; ; ", "email": "cs.toronto.edu;nvidia.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v139/acuna21a.html", "aff_unique_index": "0+1+2;3+1+2;0;0+1+2", "aff_unique_norm": "NVIDIA;University of Toronto;Vector Institute;University of Waterloo", "aff_unique_dep": "NVIDIA Corporation;;;", "aff_unique_url": "https://www.nvidia.com;https://www.utoronto.ca;https://vectorinstitute.ai/;https://uwaterloo.ca", "aff_unique_abbr": "NVIDIA;U of T;Vector Institute;UW", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+1+1;1+1+1;0;0+1+1", "aff_country_unique": "United States;Canada" }, { "title": "iDARTS: Differentiable Architecture Search with Stochastic Implicit Gradients", "status": "Spotlight", "track": "main", "site": "https://icml.cc/virtual/2021/poster/8823", "id": "8823", "proceeding": "http://proceedings.mlr.press/v139/zhang21s.html", "slides": "", "author_site": "Miao Zhang, Steven Su, Shirui Pan, Xiaojun Chang, Ehsan Abbasnejad, Reza Haffari", "author": "Miao Zhang; Steven W. Su; Shirui Pan; Xiaojun Chang; Ehsan M Abbasnejad; Reza Haffari", "abstract": "Differentiable ARchiTecture Search(DARTS) has recently become the mainstream in the neural architecture search (NAS) due to its efficiency and simplicity. With a gradient-based bi-level optimization, DARTS alternately optimizes the inner model weights and the outer architecture parameter in a weight-sharing supernet. A key challenge to the scalability and quality of the learned architectures is the need for differentiating through the inner-loop optimisation. While much has been discussed about several potentially fatal factors in DARTS, the architecture gradient, a.k.a. hypergradient, has received less attention. In this paper, we tackle the hypergradient computation in DARTS based on the implicit function theorem, making it only depends on the obtained solution to the inner-loop optimization and agnostic to the optimization path. To further reduce the computational requirements, we formulate a stochastic hypergradient approximation for differentiable NAS, and theoretically show that the architecture optimization with the proposed method is expected to converge to a stationary point. Comprehensive experiments on two NAS benchmark search spaces and the common NAS search space verify the effectiveness of our proposed method. It leads to architectures outperforming, with large margins, those learned by the baseline methods.", "bibtex": "@InProceedings{pmlr-v139-zhang21s,\n title = \t {iDARTS: Differentiable Architecture Search with Stochastic Implicit Gradients},\n author = {Zhang, Miao and Su, Steven W. and Pan, Shirui and Chang, Xiaojun and Abbasnejad, Ehsan M and Haffari, Reza},\n booktitle = \t {Proceedings of the 38th International Conference on Machine Learning},\n pages = \t {12557--12566},\n year = \t {2021},\n editor = \t {Meila, Marina and Zhang, Tong},\n volume = \t {139},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {18--24 Jul},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v139/zhang21s/zhang21s.pdf},\n url = \t {https://proceedings.mlr.press/v139/zhang21s.html},\n abstract = \t {Differentiable ARchiTecture Search(DARTS) has recently become the mainstream in the neural architecture search (NAS) due to its efficiency and simplicity. With a gradient-based bi-level optimization, DARTS alternately optimizes the inner model weights and the outer architecture parameter in a weight-sharing supernet. A key challenge to the scalability and quality of the learned architectures is the need for differentiating through the inner-loop optimisation. While much has been discussed about several potentially fatal factors in DARTS, the architecture gradient, a.k.a. hypergradient, has received less attention. In this paper, we tackle the hypergradient computation in DARTS based on the implicit function theorem, making it only depends on the obtained solution to the inner-loop optimization and agnostic to the optimization path. To further reduce the computational requirements, we formulate a stochastic hypergradient approximation for differentiable NAS, and theoretically show that the architecture optimization with the proposed method is expected to converge to a stationary point. Comprehensive experiments on two NAS benchmark search spaces and the common NAS search space verify the effectiveness of our proposed method. It leads to architectures outperforming, with large margins, those learned by the baseline methods.}\n}", "pdf": "http://proceedings.mlr.press/v139/zhang21s/zhang21s.pdf", "supp": "", "pdf_size": 375992, "gs_citation": 88, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2918201960391178882&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Faculty of Information Technology, Monash University, Australia+Faculty of Engineering and Information Technology, University of Technology Sydney, Australia; Faculty of Engineering and Information Technology, University of Technology Sydney, Australia; Faculty of Information Technology, Monash University, Australia; Faculty of Information Technology, Monash University, Australia; Australian Institute for Machine Learning, University of Adelaide, Australia; Faculty of Information Technology, Monash University, Australia", "aff_domain": "monash.edu;uts.edu.au;monash.edu;monash.edu;adelaide.edu.au;monash.edu", "email": "monash.edu;uts.edu.au;monash.edu;monash.edu;adelaide.edu.au;monash.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v139/zhang21s.html", "aff_unique_index": "0+1;1;0;0;2;0", "aff_unique_norm": "Monash University;University of Technology Sydney;University of Adelaide", "aff_unique_dep": "Faculty of Information Technology;Faculty of Engineering and Information Technology;Australian Institute for Machine Learning", "aff_unique_url": "https://www.monash.edu;https://www.uts.edu.au;https://www.adelaide.edu.au", "aff_unique_abbr": "Monash;UTS;UoA", "aff_campus_unique_index": "1;1;2", "aff_campus_unique": ";Sydney;Adelaide", "aff_country_unique_index": "0+0;0;0;0;0;0", "aff_country_unique": "Australia" } ]