[ { "title": "A Birth-Death Process for Feature Allocation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/692", "id": "692", "author_site": "Konstantina Palla, David Knowles, Zoubin Ghahramani", "author": "Konstantina Palla; David Knowles; Zoubin Ghahramani", "abstract": "We propose a Bayesian nonparametric prior over feature allocations for sequential data, the birth-death feature allocation process (BDFP). The BDFP models the evolution of the feature allocation of a set of N objects across a covariate (e.g.~time) by creating and deleting features. A BDFP is exchangeable, projective, stationary and reversible, and its equilibrium distribution is given by the Indian buffet process (IBP). We show that the Beta process on an extended space is the de Finetti mixing distribution underlying the BDFP. Finally, we present the finite approximation of the BDFP, the Beta Event Process (BEP), that permits simplified inference. The utility of the BDFP as a prior is demonstrated on real world dynamic genomics and social network data.", "bibtex": "@InProceedings{pmlr-v70-palla17a,\n title = \t {A Birth-Death Process for Feature Allocation},\n author = {Konstantina Palla and David Knowles and Zoubin Ghahramani},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2751--2759},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/palla17a/palla17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/palla17a.html},\n abstract = \t {We propose a Bayesian nonparametric prior over feature allocations for sequential data, the birth-death feature allocation process (BDFP). The BDFP models the evolution of the feature allocation of a set of N objects across a covariate (e.g.~time) by creating and deleting features. A BDFP is exchangeable, projective, stationary and reversible, and its equilibrium distribution is given by the Indian buffet process (IBP). We show that the Beta process on an extended space is the de Finetti mixing distribution underlying the BDFP. Finally, we present the finite approximation of the BDFP, the Beta Event Process (BEP), that permits simplified inference. The utility of the BDFP as a prior is demonstrated on real world dynamic genomics and social network data.}\n}", "pdf": "http://proceedings.mlr.press/v70/palla17a/palla17a.pdf", "supp": "", "pdf_size": 1082900, "gs_citation": 1, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13677833996601126973&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of Oxford, Oxford, UK; Stanford University, California, USA; University of Cambridge, Cambridge, UK + Uber AI Labs, SF, California, USA", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/palla17a.html", "aff_unique_index": "0;1;2+3", "aff_unique_norm": "University of Oxford;Stanford University;University of Cambridge;Uber AI Labs", "aff_unique_dep": ";;;AI Labs", "aff_unique_url": "https://www.ox.ac.uk;https://www.stanford.edu;https://www.cam.ac.uk;https://www.uber.com", "aff_unique_abbr": "Oxford;Stanford;Cambridge;Uber AI Labs", "aff_campus_unique_index": "0;1;2+3", "aff_campus_unique": "Oxford;California;Cambridge;San Francisco", "aff_country_unique_index": "0;1;0+1", "aff_country_unique": "United Kingdom;United States" }, { "title": "A Closer Look at Memorization in Deep Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/874", "id": "874", "author_site": "David Krueger, Yoshua Bengio, Stanislaw Jastrzebski, Maxinder S. Kanwal, Nicolas Ballas, Asja Fischer, Emmanuel Bengio, Devansh Arpit, Tegan Maharaj, Aaron Courville, Simon Lacoste-Julien", "author": "Devansh Arpit; Stanis\u0142aw Jastrz\u0119bski; Nicolas Ballas; David Krueger; Emmanuel Bengio; Maxinder S. Kanwal; Tegan Maharaj; Asja Fischer; Aaron Courville; Yoshua Bengio; Simon Lacoste-Julien", "abstract": "We examine the role of memorization in deep learning, drawing connections to capacity, generalization, and adversarial robustness. While deep networks are capable of memorizing noise data, our results suggest that they tend to prioritize learning simple patterns first. In our experiments, we expose qualitative differences in gradient-based optimization of deep neural networks (DNNs) on noise vs.~real data. We also demonstrate that for appropriately tuned explicit regularization (e.g.,~dropout) we can degrade DNN training performance on noise datasets without compromising generalization on real data. Our analysis suggests that the notions of effective capacity which are dataset independent are unlikely to explain the generalization performance of deep networks when trained with gradient based methods because training data itself plays an important role in determining the degree of memorization.", "bibtex": "@InProceedings{pmlr-v70-arpit17a,\n title = \t {A Closer Look at Memorization in Deep Networks},\n author = {Devansh Arpit and Stanis{\\l}aw Jastrz{\\k{e}}bski and Nicolas Ballas and David Krueger and Emmanuel Bengio and Maxinder S. Kanwal and Tegan Maharaj and Asja Fischer and Aaron Courville and Yoshua Bengio and Simon Lacoste-Julien},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {233--242},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/arpit17a/arpit17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/arpit17a.html},\n abstract = \t {We examine the role of memorization in deep learning, drawing connections to capacity, generalization, and adversarial robustness. While deep networks are capable of memorizing noise data, our results suggest that they tend to prioritize learning simple patterns first. In our experiments, we expose qualitative differences in gradient-based optimization of deep neural networks (DNNs) on noise vs.~real data. We also demonstrate that for appropriately tuned explicit regularization (e.g.,~dropout) we can degrade DNN training performance on noise datasets without compromising generalization on real data. Our analysis suggests that the notions of effective capacity which are dataset independent are unlikely to explain the generalization performance of deep networks when trained with gradient based methods because training data itself plays an important role in determining the degree of memorization.}\n}", "pdf": "http://proceedings.mlr.press/v70/arpit17a/arpit17a.pdf", "supp": "", "pdf_size": 3238488, "gs_citation": 2324, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=158427493479385371&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": ";;;;;;;;;;", "aff_domain": ";;;;;;;;;;", "email": ";;;;;;;;;;", "github": "", "project": "", "author_num": 11, "oa": "https://proceedings.mlr.press/v70/arpit17a.html" }, { "title": "A Distributional Perspective on Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/580", "id": "580", "author_site": "Marc Bellemare, Will Dabney, Remi Munos", "author": "Marc G. Bellemare; Will Dabney; R\u00e9mi Munos", "abstract": "In this paper we argue for the fundamental importance of the value distribution: the distribution of the random return received by a reinforcement learning agent. This is in contrast to the common approach to reinforcement learning which models the expectation of this return, or value. Although there is an established body of literature studying the value distribution, thus far it has always been used for a specific purpose such as implementing risk-aware behaviour. We begin with theoretical results in both the policy evaluation and control settings, exposing a significant distributional instability in the latter. We then use the distributional perspective to design a new algorithm which applies Bellman\u2019s equation to the learning of approximate value distributions. We evaluate our algorithm using the suite of games from the Arcade Learning Environment. We obtain both state-of-the-art results and anecdotal evidence demonstrating the importance of the value distribution in approximate reinforcement learning. Finally, we combine theoretical and empirical evidence to highlight the ways in which the value distribution impacts learning in the approximate setting.", "bibtex": "@InProceedings{pmlr-v70-bellemare17a,\n title = \t {A Distributional Perspective on Reinforcement Learning},\n author = {Marc G. Bellemare and Will Dabney and R{\\'e}mi Munos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {449--458},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bellemare17a/bellemare17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bellemare17a.html},\n abstract = \t {In this paper we argue for the fundamental importance of the value distribution: the distribution of the random return received by a reinforcement learning agent. This is in contrast to the common approach to reinforcement learning which models the expectation of this return, or value. Although there is an established body of literature studying the value distribution, thus far it has always been used for a specific purpose such as implementing risk-aware behaviour. We begin with theoretical results in both the policy evaluation and control settings, exposing a significant distributional instability in the latter. We then use the distributional perspective to design a new algorithm which applies Bellman\u2019s equation to the learning of approximate value distributions. We evaluate our algorithm using the suite of games from the Arcade Learning Environment. We obtain both state-of-the-art results and anecdotal evidence demonstrating the importance of the value distribution in approximate reinforcement learning. Finally, we combine theoretical and empirical evidence to highlight the ways in which the value distribution impacts learning in the approximate setting.}\n}", "pdf": "http://proceedings.mlr.press/v70/bellemare17a/bellemare17a.pdf", "supp": "", "pdf_size": 1184718, "gs_citation": 2118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16746050446953182873&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/bellemare17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "A Divergence Bound for Hybrids of MCMC and Variational Inference and an Application to Langevin Dynamics and SGVI", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/825", "id": "825", "author": "Justin Domke", "abstract": "Two popular classes of methods for approximate inference are Markov chain Monte Carlo (MCMC) and variational inference. MCMC tends to be accurate if run for a long enough time, while variational inference tends to give better approximations at shorter time horizons. However, the amount of time needed for MCMC to exceed the performance of variational methods can be quite high, motivating more fine-grained tradeoffs. This paper derives a distribution over variational parameters, designed to minimize a bound on the divergence between the resulting marginal distribution and the target, and gives an example of how to sample from this distribution in a way that interpolates between the behavior of existing methods based on Langevin dynamics and stochastic gradient variational inference (SGVI).", "bibtex": "@InProceedings{pmlr-v70-domke17a,\n title = \t {A Divergence Bound for Hybrids of {MCMC} and Variational Inference and an Application to {L}angevin Dynamics and {SGVI}},\n author = {Justin Domke},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1029--1038},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/domke17a/domke17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/domke17a.html},\n abstract = \t {Two popular classes of methods for approximate inference are Markov chain Monte Carlo (MCMC) and variational inference. MCMC tends to be accurate if run for a long enough time, while variational inference tends to give better approximations at shorter time horizons. However, the amount of time needed for MCMC to exceed the performance of variational methods can be quite high, motivating more fine-grained tradeoffs. This paper derives a distribution over variational parameters, designed to minimize a bound on the divergence between the resulting marginal distribution and the target, and gives an example of how to sample from this distribution in a way that interpolates between the behavior of existing methods based on Langevin dynamics and stochastic gradient variational inference (SGVI).}\n}", "pdf": "http://proceedings.mlr.press/v70/domke17a/domke17a.pdf", "supp": "", "pdf_size": 4687797, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15528339222977909391&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "College of Computing and Information Sciences, University of Massachusetts, Amherst, USA", "aff_domain": "cs.umass.edu", "email": "cs.umass.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/domke17a.html", "aff_unique_index": "0", "aff_unique_norm": "University of Massachusetts Amherst", "aff_unique_dep": "College of Computing and Information Sciences", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass Amherst", "aff_campus_unique_index": "0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "A Laplacian Framework for Option Discovery in Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/577", "id": "577", "author_site": "Marlos C. Machado, Marc Bellemare, Michael Bowling", "author": "Marlos C. Machado; Marc G. Bellemare; Michael Bowling", "abstract": "Representation learning and option discovery are two of the biggest challenges in reinforcement learning (RL). Proto-value functions (PVFs) are a well-known approach for representation learning in MDPs. In this paper we address the option discovery problem by showing how PVFs implicitly define options. We do it by introducing eigenpurposes, intrinsic reward functions derived from the learned representations. The options discovered from eigenpurposes traverse the principal directions of the state space. They are useful for multiple tasks because they are discovered without taking the environment\u2019s rewards into consideration. Moreover, different options act at different time scales, making them helpful for exploration. We demonstrate features of eigenpurposes in traditional tabular domains as well as in Atari 2600 games.", "bibtex": "@InProceedings{pmlr-v70-machado17a,\n title = \t {A {L}aplacian Framework for Option Discovery in Reinforcement Learning},\n author = {Marlos C. Machado and Marc G. Bellemare and Michael Bowling},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2295--2304},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/machado17a/machado17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/machado17a.html},\n abstract = \t {Representation learning and option discovery are two of the biggest challenges in reinforcement learning (RL). Proto-value functions (PVFs) are a well-known approach for representation learning in MDPs. In this paper we address the option discovery problem by showing how PVFs implicitly define options. We do it by introducing eigenpurposes, intrinsic reward functions derived from the learned representations. The options discovered from eigenpurposes traverse the principal directions of the state space. They are useful for multiple tasks because they are discovered without taking the environment\u2019s rewards into consideration. Moreover, different options act at different time scales, making them helpful for exploration. We demonstrate features of eigenpurposes in traditional tabular domains as well as in Atari 2600 games.}\n}", "pdf": "http://proceedings.mlr.press/v70/machado17a/machado17a.pdf", "supp": "", "pdf_size": 2620231, "gs_citation": 334, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4247433477106185827&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "University of Alberta; Google DeepMind; University of Alberta", "aff_domain": "ualberta.ca; ; ", "email": "ualberta.ca; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/machado17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Alberta;Google", "aff_unique_dep": ";Google DeepMind", "aff_unique_url": "https://www.ualberta.ca;https://deepmind.com", "aff_unique_abbr": "UAlberta;DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Canada;United Kingdom" }, { "title": "A Richer Theory of Convex Constrained Optimization with Reduced Projections and Improved Rates", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/511", "id": "511", "author_site": "Tianbao Yang, Qihang Lin, Lijun Zhang", "author": "Tianbao Yang; Qihang Lin; Lijun Zhang", "abstract": "This paper focuses on convex constrained optimization problems, where the solution is subject to a convex inequality constraint. In particular, we aim at challenging problems for which both projection into the constrained domain and a linear optimization under the inequality constraint are time-consuming, which render both projected gradient methods and conditional gradient methods (a.k.a. the Frank-Wolfe algorithm) expensive. In this paper, we develop projection reduced optimization algorithms for both smooth and non-smooth optimization with improved convergence rates under a certain regularity condition of the constraint function. We first present a general theory of optimization with only one projection. Its application to smooth optimization with only one projection yields $O(1/\\epsilon)$ iteration complexity, which improves over the $O(1/\\epsilon^2)$ iteration complexity established before for non-smooth optimization and can be further reduced under strong convexity. Then we introduce a local error bound condition and develop faster algorithms for non-strongly convex optimization at the price of a logarithmic number of projections. In particular, we achieve an iteration complexity of $\\widetilde O(1/\\epsilon^{2(1-\\theta)})$ for non-smooth optimization and $\\widetilde O(1/\\epsilon^{1-\\theta})$ for smooth optimization, where $\\theta\\in(0,1]$ appearing the local error bound condition characterizes the functional local growth rate around the optimal solutions. Novel applications in solving the constrained $\\ell_1$ minimization problem and a positive semi-definite constrained distance metric learning problem demonstrate that the proposed algorithms achieve significant speed-up compared with previous algorithms.", "bibtex": "@InProceedings{pmlr-v70-yang17f,\n title = \t {A Richer Theory of Convex Constrained Optimization with Reduced Projections and Improved Rates},\n author = {Tianbao Yang and Qihang Lin and Lijun Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3901--3910},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17f/yang17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17f.html},\n abstract = \t {This paper focuses on convex constrained optimization problems, where the solution is subject to a convex inequality constraint. In particular, we aim at challenging problems for which both projection into the constrained domain and a linear optimization under the inequality constraint are time-consuming, which render both projected gradient methods and conditional gradient methods (a.k.a. the Frank-Wolfe algorithm) expensive. In this paper, we develop projection reduced optimization algorithms for both smooth and non-smooth optimization with improved convergence rates under a certain regularity condition of the constraint function. We first present a general theory of optimization with only one projection. Its application to smooth optimization with only one projection yields $O(1/\\epsilon)$ iteration complexity, which improves over the $O(1/\\epsilon^2)$ iteration complexity established before for non-smooth optimization and can be further reduced under strong convexity. Then we introduce a local error bound condition and develop faster algorithms for non-strongly convex optimization at the price of a logarithmic number of projections. In particular, we achieve an iteration complexity of $\\widetilde O(1/\\epsilon^{2(1-\\theta)})$ for non-smooth optimization and $\\widetilde O(1/\\epsilon^{1-\\theta})$ for smooth optimization, where $\\theta\\in(0,1]$ appearing the local error bound condition characterizes the functional local growth rate around the optimal solutions. Novel applications in solving the constrained $\\ell_1$ minimization problem and a positive semi-definite constrained distance metric learning problem demonstrate that the proposed algorithms achieve significant speed-up compared with previous algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17f/yang17f.pdf", "supp": "", "pdf_size": 339133, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=161425123499175116&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "The University of Iowa, Iowa City, IA 52242, USA; The University of Iowa, Iowa City, IA 52242, USA; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", "aff_domain": "uiowa.edu; ; ", "email": "uiowa.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/yang17f.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of Iowa;Nanjing University", "aff_unique_dep": ";National Key Laboratory for Novel Software Technology", "aff_unique_url": "https://www.uiowa.edu;http://www.nju.edu.cn", "aff_unique_abbr": "UIowa;Nanjing U", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Iowa City;Nanjing", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;China" }, { "title": "A Semismooth Newton Method for Fast, Generic Convex Programming", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/512", "id": "512", "author_site": "Alnur Ali, Eric Wong, Zico Kolter", "author": "Alnur Ali; Eric Wong; J. Zico Kolter", "abstract": "We introduce Newton-ADMM, a method for fast conic optimization. The basic idea is to view the residuals of consecutive iterates generated by the alternating direction method of multipliers (ADMM) as a set of fixed point equations, and then use a nonsmooth Newton method to find a solution; we apply the basic idea to the Splitting Cone Solver (SCS), a state-of-the-art method for solving generic conic optimization problems. We demonstrate theoretically, by extending the theory of semismooth operators, that Newton-ADMM converges rapidly (i.e., quadratically) to a solution; empirically, Newton-ADMM is significantly faster than SCS on a number of problems. The method also has essentially no tuning parameters, generates certificates of primal or dual infeasibility, when appropriate, and can be specialized to solve specific convex problems.", "bibtex": "@InProceedings{pmlr-v70-ali17a,\n title = \t {A Semismooth {N}ewton Method for Fast, Generic Convex Programming},\n author = {Alnur Ali and Eric Wong and J. Zico Kolter},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {70--79},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ali17a/ali17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ali17a.html},\n abstract = \t {We introduce Newton-ADMM, a method for fast conic optimization. The basic idea is to view the residuals of consecutive iterates generated by the alternating direction method of multipliers (ADMM) as a set of fixed point equations, and then use a nonsmooth Newton method to find a solution; we apply the basic idea to the Splitting Cone Solver (SCS), a state-of-the-art method for solving generic conic optimization problems. We demonstrate theoretically, by extending the theory of semismooth operators, that Newton-ADMM converges rapidly (i.e., quadratically) to a solution; empirically, Newton-ADMM is significantly faster than SCS on a number of problems. The method also has essentially no tuning parameters, generates certificates of primal or dual infeasibility, when appropriate, and can be specialized to solve specific convex problems.}\n}", "pdf": "http://proceedings.mlr.press/v70/ali17a/ali17a.pdf", "supp": "", "pdf_size": 783603, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6167685795746530291&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Computer Science Department, Carnegie Mellon University", "aff_domain": "cmu.edu; ; ", "email": "cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ali17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Machine Learning Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Simple Multi-Class Boosting Framework with Theoretical Guarantees and Empirical Proficiency", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/675", "id": "675", "author_site": "Ron Appel, Pietro Perona", "author": "Ron Appel; Pietro Perona", "abstract": "There is a need for simple yet accurate white-box learning systems that train quickly and with little data. To this end, we showcase REBEL, a multi-class boosting method, and present a novel family of weak learners called localized similarities. Our framework provably minimizes the training error of any dataset at an exponential rate. We carry out experiments on a variety of synthetic and real datasets, demonstrating a consistent tendency to avoid overfitting. We evaluate our method on MNIST and standard UCI datasets against other state-of-the-art methods, showing the empirical proficiency of our method.", "bibtex": "@InProceedings{pmlr-v70-appel17a,\n title = \t {A Simple Multi-Class Boosting Framework with Theoretical Guarantees and Empirical Proficiency},\n author = {Ron Appel and Pietro Perona},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {186--194},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/appel17a/appel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/appel17a.html},\n abstract = \t {There is a need for simple yet accurate white-box learning systems that train quickly and with little data. To this end, we showcase REBEL, a multi-class boosting method, and present a novel family of weak learners called localized similarities. Our framework provably minimizes the training error of any dataset at an exponential rate. We carry out experiments on a variety of synthetic and real datasets, demonstrating a consistent tendency to avoid overfitting. We evaluate our method on MNIST and standard UCI datasets against other state-of-the-art methods, showing the empirical proficiency of our method.}\n}", "pdf": "http://proceedings.mlr.press/v70/appel17a/appel17a.pdf", "supp": "", "pdf_size": 2387518, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9219102443582525993&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Caltech, Pasadena, USA; Caltech, Pasadena, USA", "aff_domain": "vision.caltech.edu;vision.caltech.edu", "email": "vision.caltech.edu;vision.caltech.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/appel17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "A Simulated Annealing Based Inexact Oracle for Wasserstein Loss Minimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/585", "id": "585", "author_site": "Jianbo Ye, James Wang, Jia Li", "author": "Jianbo Ye; James Z. Wang; Jia Li", "abstract": "Learning under a Wasserstein loss, a.k.a. Wasserstein loss minimization (WLM), is an emerging research topic for gaining insights from a large set of structured objects. Despite being conceptually simple, WLM problems are computationally challenging because they involve minimizing over functions of quantities (i.e. Wasserstein distances) that themselves require numerical algorithms to compute. In this paper, we introduce a stochastic approach based on simulated annealing for solving WLMs. Particularly, we have developed a Gibbs sampler to approximate effectively and efficiently the partial gradients of a sequence of Wasserstein losses. Our new approach has the advantages of numerical stability and readiness for warm starts. These characteristics are valuable for WLM problems that often require multiple levels of iterations in which the oracle for computing the value and gradient of a loss function is embedded. We applied the method to optimal transport with Coulomb cost and the Wasserstein non-negative matrix factorization problem, and made comparisons with the existing method of entropy regularization.", "bibtex": "@InProceedings{pmlr-v70-ye17b,\n title = \t {A Simulated Annealing Based Inexact Oracle for {W}asserstein Loss Minimization},\n author = {Jianbo Ye and James Z. Wang and Jia Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3940--3948},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ye17b/ye17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/ye17b.html},\n abstract = \t {Learning under a Wasserstein loss, a.k.a. Wasserstein loss minimization (WLM), is an emerging research topic for gaining insights from a large set of structured objects. Despite being conceptually simple, WLM problems are computationally challenging because they involve minimizing over functions of quantities (i.e. Wasserstein distances) that themselves require numerical algorithms to compute. In this paper, we introduce a stochastic approach based on simulated annealing for solving WLMs. Particularly, we have developed a Gibbs sampler to approximate effectively and efficiently the partial gradients of a sequence of Wasserstein losses. Our new approach has the advantages of numerical stability and readiness for warm starts. These characteristics are valuable for WLM problems that often require multiple levels of iterations in which the oracle for computing the value and gradient of a loss function is embedded. We applied the method to optimal transport with Coulomb cost and the Wasserstein non-negative matrix factorization problem, and made comparisons with the existing method of entropy regularization.}\n}", "pdf": "http://proceedings.mlr.press/v70/ye17b/ye17b.pdf", "supp": "", "pdf_size": 2400189, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6270763475025800871&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "College of Information Sciences and Technology, The Pennsylvania State University, University Park, PA; College of Information Sciences and Technology, The Pennsylvania State University, University Park, PA; Department of Statistics, The Pennsylvania State University, University Park, PA", "aff_domain": "ist.psu.edu; ; ", "email": "ist.psu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ye17b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Pennsylvania State University", "aff_unique_dep": "College of Information Sciences and Technology", "aff_unique_url": "https://www.psu.edu", "aff_unique_abbr": "PSU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "University Park", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Unified Maximum Likelihood Approach for Estimating Symmetric Properties of Discrete Distributions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/811", "id": "811", "author_site": "Jayadev Acharya, Hirakendu Das, Alon Orlitsky, Ananda Suresh", "author": "Jayadev Acharya; Hirakendu Das; Alon Orlitsky; Ananda Theertha Suresh", "abstract": "Symmetric distribution properties such as support size, support coverage, entropy, and proximity to uniformity, arise in many applications. Recently, researchers applied different estimators and analysis tools to derive asymptotically sample-optimal approximations for each of these properties. We show that a single, simple, plug-in estimator\u2014", "bibtex": "@InProceedings{pmlr-v70-acharya17a,\n title = \t {A Unified Maximum Likelihood Approach for Estimating Symmetric Properties of Discrete Distributions},\n author = {Jayadev Acharya and Hirakendu Das and Alon Orlitsky and Ananda Theertha Suresh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {11--21},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/acharya17a/acharya17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/acharya17a.html},\n abstract = \t {Symmetric distribution properties such as support size, support coverage, entropy, and proximity to uniformity, arise in many applications. Recently, researchers applied different estimators and analysis tools to derive asymptotically sample-optimal approximations for each of these properties. We show that a single, simple, plug-in estimator\u2014", "pdf": "http://proceedings.mlr.press/v70/acharya17a/acharya17a.pdf", "supp": "", "pdf_size": 489365, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15953091848215800274&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Cornell University; Yahoo Inc!; University of California, San Diego; Google Research", "aff_domain": "cornell.edu;yahoo-inc.com;ucsd.edu;google.com", "email": "cornell.edu;yahoo-inc.com;ucsd.edu;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/acharya17a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Cornell University;Yahoo;University of California, San Diego;Google", "aff_unique_dep": ";;;Google Research", "aff_unique_url": "https://www.cornell.edu;https://www.yahoo.com;https://www.ucsd.edu;https://research.google", "aff_unique_abbr": "Cornell;Yahoo;UCSD;Google Research", "aff_campus_unique_index": "1;2", "aff_campus_unique": ";San Diego;Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "A Unified Variance Reduction-Based Framework for Nonconvex Low-Rank Matrix Recovery", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/624", "id": "624", "author_site": "Lingxiao Wang, Xiao Zhang, Quanquan Gu", "author": "Lingxiao Wang; Xiao Zhang; Quanquan Gu", "abstract": "We propose a generic framework based on a new stochastic variance-reduced gradient descent algorithm for accelerating nonconvex low-rank matrix recovery. Starting from an appropriate initial estimator, our proposed algorithm performs projected gradient descent based on a novel semi-stochastic gradient specifically designed for low-rank matrix recovery. Based upon the mild restricted strong convexity and smoothness conditions, we derive a projected notion of the restricted Lipschitz continuous gradient property, and prove that our algorithm enjoys linear convergence rate to the unknown low-rank matrix with an improved computational complexity. Moreover, our algorithm can be employed to both noiseless and noisy observations, where the (near) optimal sample complexity and statistical rate can be attained respectively. We further illustrate the superiority of our generic framework through several specific examples, both theoretically and experimentally.", "bibtex": "@InProceedings{pmlr-v70-wang17n,\n title = \t {A Unified Variance Reduction-Based Framework for Nonconvex Low-Rank Matrix Recovery},\n author = {Lingxiao Wang and Xiao Zhang and Quanquan Gu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3712--3721},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17n/wang17n.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17n.html},\n abstract = \t {We propose a generic framework based on a new stochastic variance-reduced gradient descent algorithm for accelerating nonconvex low-rank matrix recovery. Starting from an appropriate initial estimator, our proposed algorithm performs projected gradient descent based on a novel semi-stochastic gradient specifically designed for low-rank matrix recovery. Based upon the mild restricted strong convexity and smoothness conditions, we derive a projected notion of the restricted Lipschitz continuous gradient property, and prove that our algorithm enjoys linear convergence rate to the unknown low-rank matrix with an improved computational complexity. Moreover, our algorithm can be employed to both noiseless and noisy observations, where the (near) optimal sample complexity and statistical rate can be attained respectively. We further illustrate the superiority of our generic framework through several specific examples, both theoretically and experimentally.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17n/wang17n.pdf", "supp": "", "pdf_size": 701521, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10922883488811000821&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA; Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA; Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA", "aff_domain": "virginia.edu; ;virginia.edu", "email": "virginia.edu; ;virginia.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/wang17n.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Virginia", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.virginia.edu", "aff_unique_abbr": "UVA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Charlottesville", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "A Unified View of Multi-Label Performance Measures", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/751", "id": "751", "author_site": "Xi-Zhu Wu, Zhi-Hua Zhou", "author": "Xi-Zhu Wu; Zhi-Hua Zhou", "abstract": "Multi-label classification deals with the problem where each instance is associated with multiple class labels. Because evaluation in multi-label classification is more complicated than single-label setting, a number of performance measures have been proposed. It is noticed that an algorithm usually performs differently on different measures. Therefore, it is important to understand which algorithms perform well on which measure(s) and why. In this paper, we propose a unified margin view to revisit eleven performance measures in multi-label classification. In particular, we define label-wise margin and instance-wise margin, and prove that through maximizing these margins, different corresponding performance measures are to be optimized. Based on the defined margins, a max-margin approach called LIMO is designed and empirical results validate our theoretical findings.", "bibtex": "@InProceedings{pmlr-v70-wu17a,\n title = \t {A Unified View of Multi-Label Performance Measures},\n author = {Xi-Zhu Wu and Zhi-Hua Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3780--3788},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wu17a/wu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wu17a.html},\n abstract = \t {Multi-label classification deals with the problem where each instance is associated with multiple class labels. Because evaluation in multi-label classification is more complicated than single-label setting, a number of performance measures have been proposed. It is noticed that an algorithm usually performs differently on different measures. Therefore, it is important to understand which algorithms perform well on which measure(s) and why. In this paper, we propose a unified margin view to revisit eleven performance measures in multi-label classification. In particular, we define label-wise margin and instance-wise margin, and prove that through maximizing these margins, different corresponding performance measures are to be optimized. Based on the defined margins, a max-margin approach called LIMO is designed and empirical results validate our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v70/wu17a/wu17a.pdf", "supp": "", "pdf_size": 577675, "gs_citation": 310, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16547716232673594226&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn", "email": "lamda.nju.edu.cn;lamda.nju.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/wu17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "National Key Laboratory for Novel Software Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "Nanjing U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nanjing", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Accelerating Eulerian Fluid Simulation With Convolutional Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/549", "id": "549", "author_site": "Jonathan Tompson, Kristofer D Schlachter, Pablo Sprechmann, Ken Perlin", "author": "Jonathan Tompson; Kristofer Schlachter; Pablo Sprechmann; Ken Perlin", "abstract": "Efficient simulation of the Navier-Stokes equations for fluid flow is a long standing problem in applied mathematics, for which state-of-the-art methods require large compute resources. In this work, we propose a data-driven approach that leverages the approximation power of deep-learning with the precision of standard solvers to obtain fast and highly realistic simulations. Our method solves the incompressible Euler equations using the standard operator splitting method, in which a large sparse linear system with many free parameters must be solved. We use a Convolutional Network with a highly tailored architecture, trained using a novel unsupervised learning framework to solve the linear system. We present real-time 2D and 3D simulations that outperform recently proposed data-driven methods; the obtained results are realistic and show good generalization properties.", "bibtex": "@InProceedings{pmlr-v70-tompson17a,\n title = \t {Accelerating {E}ulerian Fluid Simulation With Convolutional Networks},\n author = {Jonathan Tompson and Kristofer Schlachter and Pablo Sprechmann and Ken Perlin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3424--3433},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tompson17a/tompson17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tompson17a.html},\n abstract = \t {Efficient simulation of the Navier-Stokes equations for fluid flow is a long standing problem in applied mathematics, for which state-of-the-art methods require large compute resources. In this work, we propose a data-driven approach that leverages the approximation power of deep-learning with the precision of standard solvers to obtain fast and highly realistic simulations. Our method solves the incompressible Euler equations using the standard operator splitting method, in which a large sparse linear system with many free parameters must be solved. We use a Convolutional Network with a highly tailored architecture, trained using a novel unsupervised learning framework to solve the linear system. We present real-time 2D and 3D simulations that outperform recently proposed data-driven methods; the obtained results are realistic and show good generalization properties.}\n}", "pdf": "http://proceedings.mlr.press/v70/tompson17a/tompson17a.pdf", "supp": "", "pdf_size": 4905830, "gs_citation": 741, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6898944616119094845&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Google Brain, Mountain View, USA; New York University, New York, USA; New York University, New York, USA + Google Deepmind, London, UK; New York University, New York, USA", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/tompson17a.html", "aff_unique_index": "0;1;1+0;1", "aff_unique_norm": "Google;New York University", "aff_unique_dep": "Google Brain;", "aff_unique_url": "https://brain.google.com;https://www.nyu.edu", "aff_unique_abbr": "Google Brain;NYU", "aff_campus_unique_index": "0;1;1+2;1", "aff_campus_unique": "Mountain View;New York;London", "aff_country_unique_index": "0;0;0+1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Active Heteroscedastic Regression", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/785", "id": "785", "author_site": "Kamalika Chaudhuri, Prateek Jain, Nagarajan Natarajan", "author": "Kamalika Chaudhuri; Prateek Jain; Nagarajan Natarajan", "abstract": "An active learner is given a model class $\\Theta$, a large sample of unlabeled data drawn from an underlying distribution and access to a labeling oracle that can provide a label for any of the unlabeled instances. The goal of the learner is to find a model $\\theta \\in \\Theta$ that fits the data to a given accuracy while making as few label queries to the oracle as possible. In this work, we consider a theoretical analysis of the label requirement of active learning for regression under a heteroscedastic noise model, where the noise depends on the instance. We provide bounds on the convergence rates of active and passive learning for heteroscedastic regression. Our results illustrate that just like in binary classification, some partial knowledge of the nature of the noise can lead to significant gains in the label requirement of active learning.", "bibtex": "@InProceedings{pmlr-v70-chaudhuri17a,\n title = \t {Active Heteroscedastic Regression},\n author = {Kamalika Chaudhuri and Prateek Jain and Nagarajan Natarajan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {694--702},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chaudhuri17a/chaudhuri17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chaudhuri17a.html},\n abstract = \t {An active learner is given a model class $\\Theta$, a large sample of unlabeled data drawn from an underlying distribution and access to a labeling oracle that can provide a label for any of the unlabeled instances. The goal of the learner is to find a model $\\theta \\in \\Theta$ that fits the data to a given accuracy while making as few label queries to the oracle as possible. In this work, we consider a theoretical analysis of the label requirement of active learning for regression under a heteroscedastic noise model, where the noise depends on the instance. We provide bounds on the convergence rates of active and passive learning for heteroscedastic regression. Our results illustrate that just like in binary classification, some partial knowledge of the nature of the noise can lead to significant gains in the label requirement of active learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/chaudhuri17a/chaudhuri17a.pdf", "supp": "", "pdf_size": 302168, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15260894680089258133&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of California, San Diego; Microsoft Research, India; Microsoft Research, India", "aff_domain": "ucsd.edu;microsoft.com;microsoft.com", "email": "ucsd.edu;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/chaudhuri17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of California, San Diego;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.ucsd.edu;https://www.microsoft.com/en-us/research/group/india.aspx", "aff_unique_abbr": "UCSD;MSR India", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego;", "aff_country_unique_index": "0;1;1", "aff_country_unique": "United States;India" }, { "title": "Active Learning for Accurate Estimation of Linear Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/483", "id": "483", "author_site": "Carlos Riquelme Ruiz, Mohammad Ghavamzadeh, Alessandro Lazaric", "author": "Carlos Riquelme; Mohammad Ghavamzadeh; Alessandro Lazaric", "abstract": "We explore the sequential decision making problem where the goal is to estimate uniformly well a number of linear models, given a shared budget of random contexts independently sampled from a known distribution. The decision maker must query one of the linear models for each incoming context, and receives an observation corrupted by noise levels that are unknown, and depend on the model instance. We present Trace-UCB, an adaptive allocation algorithm that learns the noise levels while balancing contexts accordingly across the different linear functions, and derive guarantees for simple regret in both expectation and high-probability. Finally, we extend the algorithm and its guarantees to high dimensional settings, where the number of linear models times the dimension of the contextual space is higher than the total budget of samples. Simulations with real data suggest that Trace-UCB is remarkably robust, outperforming a number of baselines even when its assumptions are violated.", "bibtex": "@InProceedings{pmlr-v70-riquelme17a,\n title = \t {Active Learning for Accurate Estimation of Linear Models},\n author = {Carlos Riquelme and Mohammad Ghavamzadeh and Alessandro Lazaric},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2931--2939},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/riquelme17a/riquelme17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/riquelme17a.html},\n abstract = \t {We explore the sequential decision making problem where the goal is to estimate uniformly well a number of linear models, given a shared budget of random contexts independently sampled from a known distribution. The decision maker must query one of the linear models for each incoming context, and receives an observation corrupted by noise levels that are unknown, and depend on the model instance. We present Trace-UCB, an adaptive allocation algorithm that learns the noise levels while balancing contexts accordingly across the different linear functions, and derive guarantees for simple regret in both expectation and high-probability. Finally, we extend the algorithm and its guarantees to high dimensional settings, where the number of linear models times the dimension of the contextual space is higher than the total budget of samples. Simulations with real data suggest that Trace-UCB is remarkably robust, outperforming a number of baselines even when its assumptions are violated.}\n}", "pdf": "http://proceedings.mlr.press/v70/riquelme17a/riquelme17a.pdf", "supp": "", "pdf_size": 4212461, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17634854255575080773&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Stanford University; DeepMind + Adobe Research; Inria Lille", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/riquelme17a.html", "aff_unique_index": "0;1+2;3", "aff_unique_norm": "Stanford University;DeepMind;Adobe;INRIA", "aff_unique_dep": ";;Adobe Research;", "aff_unique_url": "https://www.stanford.edu;https://deepmind.com;https://research.adobe.com;https://www.inria.fr", "aff_unique_abbr": "Stanford;DeepMind;Adobe;Inria", "aff_campus_unique_index": "0;;2", "aff_campus_unique": "Stanford;;Lille", "aff_country_unique_index": "0;1+0;2", "aff_country_unique": "United States;United Kingdom;France" }, { "title": "Active Learning for Cost-Sensitive Classification", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/795", "id": "795", "author_site": "Akshay Krishnamurthy, Alekh Agarwal, Tzu-Kuo Huang, Hal Daum\u00e9 III, John Langford", "author": "Akshay Krishnamurthy; Alekh Agarwal; Tzu-Kuo Huang; Hal Daum\u00e9 III; John Langford", "abstract": "We design an active learning algorithm for cost-sensitive multiclass classification: problems where different errors have different costs. Our algorithm, COAL, makes predictions by regressing to each label\u2019s cost and predicting the smallest. On a new example, it uses a set of regressors that perform well on past data to estimate possible costs for each label. It queries only the labels that could be the best, ignoring the sure losers. We prove COAL can be efficiently implemented for any regression family that admits squared loss optimization; it also enjoys strong guarantees with respect to predictive performance and labeling effort. Our experiment with COAL show significant improvements in labeling effort and test cost over passive and active baselines.", "bibtex": "@InProceedings{pmlr-v70-krishnamurthy17a,\n title = \t {Active Learning for Cost-Sensitive Classification},\n author = {Akshay Krishnamurthy and Alekh Agarwal and Tzu-Kuo Huang and Daum{\\'e}, III, Hal and John Langford},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1915--1924},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/krishnamurthy17a/krishnamurthy17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/krishnamurthy17a.html},\n abstract = \t {We design an active learning algorithm for cost-sensitive multiclass classification: problems where different errors have different costs. Our algorithm, COAL, makes predictions by regressing to each label\u2019s cost and predicting the smallest. On a new example, it uses a set of regressors that perform well on past data to estimate possible costs for each label. It queries only the labels that could be the best, ignoring the sure losers. We prove COAL can be efficiently implemented for any regression family that admits squared loss optimization; it also enjoys strong guarantees with respect to predictive performance and labeling effort. Our experiment with COAL show significant improvements in labeling effort and test cost over passive and active baselines.}\n}", "pdf": "http://proceedings.mlr.press/v70/krishnamurthy17a/krishnamurthy17a.pdf", "supp": "", "pdf_size": 834774, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5402359100618907030&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Massachusetts, Amherst, MA; Microsoft Research, New York, NY; Uber Advanced Technology Center, Pittsburgh, PA; University of Maryland, College Park, MD; Microsoft Research, New York, NY", "aff_domain": "cs.umass.edu; ; ; ; ", "email": "cs.umass.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/krishnamurthy17a.html", "aff_unique_index": "0;1;2;3;1", "aff_unique_norm": "University of Massachusetts Amherst;Microsoft;Uber;University of Maryland", "aff_unique_dep": ";Microsoft Research;Advanced Technology Center;", "aff_unique_url": "https://www.umass.edu;https://www.microsoft.com/en-us/research;https://www.uber.com;https://www/umd.edu", "aff_unique_abbr": "UMass Amherst;MSR;Uber ATC;UMD", "aff_campus_unique_index": "0;1;2;3;1", "aff_campus_unique": "Amherst;New York;Pittsburgh;College Park", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Active Learning for Top-$K$ Rank Aggregation from Noisy Comparisons", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/819", "id": "819", "author_site": "Soheil Mohajer, Changho Suh, Adel Elmahdy", "author": "Soheil Mohajer; Changho Suh; Adel Elmahdy", "abstract": "We explore an active top-$K$ ranking problem based on pairwise comparisons that are collected possibly in a sequential manner as per our design choice. We consider two settings: (1)", "bibtex": "@InProceedings{pmlr-v70-mohajer17a,\n title = \t {Active Learning for Top-$K$ Rank Aggregation from Noisy Comparisons},\n author = {Soheil Mohajer and Changho Suh and Adel Elmahdy},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2488--2497},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mohajer17a/mohajer17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mohajer17a.html},\n abstract = \t {We explore an active top-$K$ ranking problem based on pairwise comparisons that are collected possibly in a sequential manner as per our design choice. We consider two settings: (1)", "pdf": "http://proceedings.mlr.press/v70/mohajer17a/mohajer17a.pdf", "supp": "", "pdf_size": 494672, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13740050827966847328&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "ECE, University of Minnesota, Twin Cities, MN, USA; EE, KAIST, Daejeon, South Korea; ECE, University of Minnesota, Twin Cities, MN, USA", "aff_domain": "umn.edu;kaist.ac.kr;umn.edu", "email": "umn.edu;kaist.ac.kr;umn.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mohajer17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Minnesota;Korea Advanced Institute of Science and Technology", "aff_unique_dep": "Electrical and Computer Engineering;Department of Electrical Engineering", "aff_unique_url": "https://www.umn.edu;https://www.kaist.ac.kr", "aff_unique_abbr": "UMN;KAIST", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Twin Cities;Daejeon", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;South Korea" }, { "title": "AdaNet: Adaptive Structural Learning of Artificial Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/682", "id": "682", "author_site": "Corinna Cortes, Xavi Gonzalvo, Vitaly Kuznetsov, Mehryar Mohri, Scott Yang", "author": "Corinna Cortes; Xavier Gonzalvo; Vitaly Kuznetsov; Mehryar Mohri; Scott Yang", "abstract": "We present a new framework for analyzing and learning artificial neural networks. Our approach simultaneously and adaptively learns both the structure of the network as well as its weights. The methodology is based upon and accompanied by strong data-dependent theoretical learning guarantees, so that the final network architecture provably adapts to the complexity of any given problem.", "bibtex": "@InProceedings{pmlr-v70-cortes17a,\n title = \t {{A}da{N}et: Adaptive Structural Learning of Artificial Neural Networks},\n author = {Corinna Cortes and Xavier Gonzalvo and Vitaly Kuznetsov and Mehryar Mohri and Scott Yang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {874--883},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cortes17a/cortes17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cortes17a.html},\n abstract = \t {We present a new framework for analyzing and learning artificial neural networks. Our approach simultaneously and adaptively learns both the structure of the network as well as its weights. The methodology is based upon and accompanied by strong data-dependent theoretical learning guarantees, so that the final network architecture provably adapts to the complexity of any given problem.}\n}", "pdf": "http://proceedings.mlr.press/v70/cortes17a/cortes17a.pdf", "supp": "", "pdf_size": 465041, "gs_citation": 379, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10315414126445079972&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, New York, NY, USA; Courant Institute of Mathematical Sciences, New York, NY, USA; Courant Institute of Mathematical Sciences, New York, NY, USA", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/cortes17a.html", "aff_unique_index": "0;0;0;1;1", "aff_unique_norm": "Google;Courant Institute of Mathematical Sciences", "aff_unique_dep": "Google Research;Mathematical Sciences", "aff_unique_url": "https://research.google;https://courant.nyu.edu", "aff_unique_abbr": "Google Research;Courant", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adapting Kernel Representations Online Using Submodular Maximization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/668", "id": "668", "author_site": "Matthew Schlegel, Yangchen Pan, Jiecao Chen, Martha White", "author": "Matthew Schlegel; Yangchen Pan; Jiecao Chen; Martha White", "abstract": "Kernel representations provide a nonlinear representation, through similarities to prototypes, but require only simple linear learning algorithms given those prototypes. In a continual learning setting, with a constant stream of observations, it is critical to have an efficient mechanism for sub-selecting prototypes amongst observations. In this work, we develop an approximately submodular criterion for this setting, and an efficient online greedy submodular maximization algorithm for optimizing the criterion. We extend streaming submodular maximization algorithms to continual learning, by removing the need for multiple passes\u2014which is infeasible\u2014and instead introducing the idea of coverage time. We propose a general block-diagonal approximation for the greedy update with our criterion, that enables updates linear in the number of prototypes. We empirically demonstrate the effectiveness of this approximation, in terms of approximation quality, significant runtime improvements, and effective prediction performance.", "bibtex": "@InProceedings{pmlr-v70-schlegel17a,\n title = \t {Adapting Kernel Representations Online Using Submodular Maximization},\n author = {Matthew Schlegel and Yangchen Pan and Jiecao Chen and Martha White},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3037--3046},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/schlegel17a/schlegel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/schlegel17a.html},\n abstract = \t {Kernel representations provide a nonlinear representation, through similarities to prototypes, but require only simple linear learning algorithms given those prototypes. In a continual learning setting, with a constant stream of observations, it is critical to have an efficient mechanism for sub-selecting prototypes amongst observations. In this work, we develop an approximately submodular criterion for this setting, and an efficient online greedy submodular maximization algorithm for optimizing the criterion. We extend streaming submodular maximization algorithms to continual learning, by removing the need for multiple passes\u2014which is infeasible\u2014and instead introducing the idea of coverage time. We propose a general block-diagonal approximation for the greedy update with our criterion, that enables updates linear in the number of prototypes. We empirically demonstrate the effectiveness of this approximation, in terms of approximation quality, significant runtime improvements, and effective prediction performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/schlegel17a/schlegel17a.pdf", "supp": "", "pdf_size": 900472, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1208231544483246124&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Indiana University, Bloomington; Department of Computer Science, Indiana University, Bloomington; Department of Computer Science, Indiana University, Bloomington; Department of Computer Science, Indiana University, Bloomington", "aff_domain": "indiana.edu; ; ; ", "email": "indiana.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/schlegel17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Indiana University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.indiana.edu", "aff_unique_abbr": "IU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Bloomington", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Consensus ADMM for Distributed Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/632", "id": "632", "author_site": "Zheng Xu, Gavin Taylor, Hao Li, Mario Figueiredo, Xiaoming Yuan, Tom Goldstein", "author": "Zheng Xu; Gavin Taylor; Hao Li; M\u00e1rio A. T. Figueiredo; Xiaoming Yuan; Tom Goldstein", "abstract": "The alternating direction method of multipliers (ADMM) is commonly used for distributed model fitting problems, but its performance and reliability depend strongly on user-defined penalty parameters. We study distributed ADMM methods that boost performance by using different fine-tuned algorithm parameters on each worker node. We present a O(1/k) convergence rate for adaptive ADMM methods with node-specific parameters, and propose adaptive consensus ADMM (ACADMM), which automatically tunes parameters without user oversight.", "bibtex": "@InProceedings{pmlr-v70-xu17c,\n title = \t {Adaptive Consensus {ADMM} for Distributed Optimization},\n author = {Zheng Xu and Gavin Taylor and Hao Li and M{\\'a}rio A. T. Figueiredo and Xiaoming Yuan and Tom Goldstein},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3841--3850},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xu17c/xu17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/xu17c.html},\n abstract = \t {The alternating direction method of multipliers (ADMM) is commonly used for distributed model fitting problems, but its performance and reliability depend strongly on user-defined penalty parameters. We study distributed ADMM methods that boost performance by using different fine-tuned algorithm parameters on each worker node. We present a O(1/k) convergence rate for adaptive ADMM methods with node-specific parameters, and propose adaptive consensus ADMM (ACADMM), which automatically tunes parameters without user oversight.}\n}", "pdf": "http://proceedings.mlr.press/v70/xu17c/xu17c.pdf", "supp": "", "pdf_size": 475228, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18363715782931300628&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/xu17c.html" }, { "title": "Adaptive Feature Selection: Computationally Efficient Online Sparse Linear Regression under RIP", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/760", "id": "760", "author_site": "Satyen Kale, Zohar Karnin, Tengyuan Liang, David Pal", "author": "Satyen Kale; Zohar Karnin; Tengyuan Liang; D\u00e1vid P\u00e1l", "abstract": "Online sparse linear regression is an online problem where an algorithm repeatedly chooses a subset of coordinates to observe in an adversarially chosen feature vector, makes a real-valued prediction, receives the true label, and incurs the squared loss. The goal is to design an online learning algorithm with sublinear regret to the best sparse linear predictor in hindsight. Without any assumptions, this problem is known to be computationally intractable. In this paper, we make the assumption that data matrix satisfies restricted isometry property, and show that this assumption leads to computationally efficient algorithms with sublinear regret for two variants of the problem. In the first variant, the true label is generated according to a sparse linear model with additive Gaussian noise. In the second, the true label is chosen adversarially.", "bibtex": "@InProceedings{pmlr-v70-kale17a,\n title = \t {Adaptive Feature Selection: Computationally Efficient Online Sparse Linear Regression under {RIP}},\n author = {Satyen Kale and Zohar Karnin and Tengyuan Liang and D{\\'a}vid P{\\'a}l},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1780--1788},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kale17a/kale17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kale17a.html},\n abstract = \t {Online sparse linear regression is an online problem where an algorithm repeatedly chooses a subset of coordinates to observe in an adversarially chosen feature vector, makes a real-valued prediction, receives the true label, and incurs the squared loss. The goal is to design an online learning algorithm with sublinear regret to the best sparse linear predictor in hindsight. Without any assumptions, this problem is known to be computationally intractable. In this paper, we make the assumption that data matrix satisfies restricted isometry property, and show that this assumption leads to computationally efficient algorithms with sublinear regret for two variants of the problem. In the first variant, the true label is generated according to a sparse linear model with additive Gaussian noise. In the second, the true label is chosen adversarially.}\n}", "pdf": "http://proceedings.mlr.press/v70/kale17a/kale17a.pdf", "supp": "", "pdf_size": 327316, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17365469933362776835&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Google Research, New York; Amazon, New York; University of Chicago, Booth School of Business, Chicago; Yahoo Research, New York", "aff_domain": "google.com;gmail.com;chicagobooth.edu;yahoo-inc.com", "email": "google.com;gmail.com;chicagobooth.edu;yahoo-inc.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/kale17a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Google;Amazon;University of Chicago;Yahoo Research", "aff_unique_dep": "Google Research;Amazon;Booth School of Business;", "aff_unique_url": "https://research.google;https://www.amazon.com;https://www.chicagobooth.edu;https://research.yahoo.com", "aff_unique_abbr": "Google;Amazon;UChicago;Yahoo Res.", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "New York;Chicago", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Multiple-Arm Identification", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/546", "id": "546", "author_site": "Jiecao Chen, Xi Chen, Qin Zhang, Yuan Zhou", "author": "Jiecao Chen; Xi Chen; Qin Zhang; Yuan Zhou", "abstract": "We study the problem of selecting K arms with the highest expected rewards in a stochastic n-armed bandit game. This problem has a wide range of applications, e.g., A/B testing, crowdsourcing, simulation optimization. Our goal is to develop a PAC algorithm, which, with probability at least $1-\\delta$, identifies a set of K arms with the aggregate regret at most $\\epsilon$. The notion of aggregate regret for multiple-arm identification was first introduced in Zhou et. al. (2014), which is defined as the difference of the averaged expected rewards between the selected set of arms and the best K arms. In contrast to Zhou et. al. (2014) that only provides instance-independent sample complexity, we introduce a new hardness parameter for characterizing the difficulty of any given instance. We further develop two algorithms and establish the corresponding sample complexity in terms of this hardness parameter. The derived sample complexity can be significantly smaller than state-of-the-art results for a large class of instances and matches the instance-independent lower bound up to a $\\log(\\epsilon^{-1})$ factor in the worst case. We also prove a lower bound result showing that the extra $\\log(\\epsilon^{-1})$ is necessary for instance-dependent algorithms using the introduced hardness parameter.", "bibtex": "@InProceedings{pmlr-v70-chen17b,\n title = \t {Adaptive Multiple-Arm Identification},\n author = {Jiecao Chen and Xi Chen and Qin Zhang and Yuan Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {722--730},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17b/chen17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17b.html},\n abstract = \t {We study the problem of selecting K arms with the highest expected rewards in a stochastic n-armed bandit game. This problem has a wide range of applications, e.g., A/B testing, crowdsourcing, simulation optimization. Our goal is to develop a PAC algorithm, which, with probability at least $1-\\delta$, identifies a set of K arms with the aggregate regret at most $\\epsilon$. The notion of aggregate regret for multiple-arm identification was first introduced in Zhou et. al. (2014), which is defined as the difference of the averaged expected rewards between the selected set of arms and the best K arms. In contrast to Zhou et. al. (2014) that only provides instance-independent sample complexity, we introduce a new hardness parameter for characterizing the difficulty of any given instance. We further develop two algorithms and establish the corresponding sample complexity in terms of this hardness parameter. The derived sample complexity can be significantly smaller than state-of-the-art results for a large class of instances and matches the instance-independent lower bound up to a $\\log(\\epsilon^{-1})$ factor in the worst case. We also prove a lower bound result showing that the extra $\\log(\\epsilon^{-1})$ is necessary for instance-dependent algorithms using the introduced hardness parameter.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17b/chen17b.pdf", "supp": "", "pdf_size": 865120, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2547066879144828024&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Computer Science Department, Indiana University, Bloomington, IN, USA+ Stern School of Business, New York University, New York, NY, USA; Stern School of Business, New York University, New York, NY, USA; Computer Science Department, Indiana University, Bloomington, IN, USA; Computer Science Department, Indiana University, Bloomington, IN, USA", "aff_domain": "umail.iu.edu;stern.nyu.edu;indiana.edu;indiana.edu", "email": "umail.iu.edu;stern.nyu.edu;indiana.edu;indiana.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/chen17b.html", "aff_unique_index": "0+1;1;0;0", "aff_unique_norm": "Indiana University;New York University", "aff_unique_dep": "Computer Science Department;Stern School of Business", "aff_unique_url": "https://www.indiana.edu;https://www.nyu.edu", "aff_unique_abbr": "IU;NYU", "aff_campus_unique_index": "0+1;1;0;0", "aff_campus_unique": "Bloomington;New York", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Neural Networks for Efficient Inference", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/807", "id": "807", "author_site": "Tolga Bolukbasi, Joseph Wang, Ofer Dekel, Venkatesh Saligrama", "author": "Tolga Bolukbasi; Joseph Wang; Ofer Dekel; Venkatesh Saligrama", "abstract": "We present an approach to adaptively utilize deep neural networks in order to reduce the evaluation time on new examples without loss of accuracy. Rather than attempting to redesign or approximate existing networks, we propose two schemes that adaptively utilize networks. We first pose an adaptive network evaluation scheme, where we learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example. We show that computational time can be dramatically reduced by exploiting the fact that many examples can be correctly classified using relatively efficient networks and that complex, computationally costly networks are only necessary for a small fraction of examples. We pose a global objective for learning an adaptive early exit or network selection policy and solve it by reducing the policy learning problem to a layer-by-layer weighted binary classification problem. Empirically, these approaches yield dramatic reductions in computational cost, with up to a 2.8x speedup on state-of-the-art networks from the ImageNet image recognition challenge with minimal ($<1\\%$) loss of top5 accuracy.", "bibtex": "@InProceedings{pmlr-v70-bolukbasi17a,\n title = \t {Adaptive Neural Networks for Efficient Inference},\n author = {Tolga Bolukbasi and Joseph Wang and Ofer Dekel and Venkatesh Saligrama},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {527--536},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bolukbasi17a/bolukbasi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bolukbasi17a.html},\n abstract = \t {We present an approach to adaptively utilize deep neural networks in order to reduce the evaluation time on new examples without loss of accuracy. Rather than attempting to redesign or approximate existing networks, we propose two schemes that adaptively utilize networks. We first pose an adaptive network evaluation scheme, where we learn a system to adaptively choose the components of a deep network to be evaluated for each example. By allowing examples correctly classified using early layers of the system to exit, we avoid the computational time associated with full evaluation of the network. We extend this to learn a network selection system that adaptively selects the network to be evaluated for each example. We show that computational time can be dramatically reduced by exploiting the fact that many examples can be correctly classified using relatively efficient networks and that complex, computationally costly networks are only necessary for a small fraction of examples. We pose a global objective for learning an adaptive early exit or network selection policy and solve it by reducing the policy learning problem to a layer-by-layer weighted binary classification problem. Empirically, these approaches yield dramatic reductions in computational cost, with up to a 2.8x speedup on state-of-the-art networks from the ImageNet image recognition challenge with minimal ($<1\\%$) loss of top5 accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v70/bolukbasi17a/bolukbasi17a.pdf", "supp": "", "pdf_size": 614506, "gs_citation": 490, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12345580443155328111&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Boston University, Boston, MA, USA; Amazon, Cambridge, MA, USA; Microsoft Research, Redmond, WA, USA; Boston University, Boston, MA, USA", "aff_domain": "bu.edu; ; ; ", "email": "bu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bolukbasi17a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Boston University;Amazon;Microsoft", "aff_unique_dep": ";Amazon;Microsoft Research", "aff_unique_url": "https://www.bu.edu;https://www.amazon.com;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "BU;Amazon;MSR", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "Boston;Cambridge;Redmond", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adaptive Sampling Probabilities for Non-Smooth Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/562", "id": "562", "author_site": "Hongseok Namkoong, Aman Sinha, Steven Yadlowsky, John Duchi", "author": "Hongseok Namkoong; Aman Sinha; Steve Yadlowsky; John C. Duchi", "abstract": "Standard forms of coordinate and stochastic gradient methods do not adapt to structure in data; their good behavior under random sampling is predicated on uniformity in data. When gradients in certain blocks of features (for coordinate descent) or examples (for SGD) are larger than others, there is a natural structure that can be exploited for quicker convergence. Yet adaptive variants often suffer nontrivial computational overhead. We present a framework that discovers and leverages such structural properties at a low computational cost. We employ a bandit optimization procedure that \u201clearns\u201d probabilities for sampling coordinates or examples in (non-smooth) optimization problems, allowing us to guarantee performance close to that of the optimal stationary sampling distribution. When such structures exist, our algorithms achieve tighter convergence guarantees than their non-adaptive counterparts, and we complement our analysis with experiments on several datasets.", "bibtex": "@InProceedings{pmlr-v70-namkoong17a,\n title = \t {Adaptive Sampling Probabilities for Non-Smooth Optimization},\n author = {Hongseok Namkoong and Aman Sinha and Steve Yadlowsky and John C. Duchi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2574--2583},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/namkoong17a/namkoong17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/namkoong17a.html},\n abstract = \t {Standard forms of coordinate and stochastic gradient methods do not adapt to structure in data; their good behavior under random sampling is predicated on uniformity in data. When gradients in certain blocks of features (for coordinate descent) or examples (for SGD) are larger than others, there is a natural structure that can be exploited for quicker convergence. Yet adaptive variants often suffer nontrivial computational overhead. We present a framework that discovers and leverages such structural properties at a low computational cost. We employ a bandit optimization procedure that \u201clearns\u201d probabilities for sampling coordinates or examples in (non-smooth) optimization problems, allowing us to guarantee performance close to that of the optimal stationary sampling distribution. When such structures exist, our algorithms achieve tighter convergence guarantees than their non-adaptive counterparts, and we complement our analysis with experiments on several datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/namkoong17a/namkoong17a.pdf", "supp": "", "pdf_size": 1136464, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14973702434157131380&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Management Science & Engineering, Stanford University, USA+Electrical Engineering, Stanford University, USA+Statistics, Stanford University, USA; Electrical Engineering, Stanford University, USA+Statistics, Stanford University, USA; Electrical Engineering, Stanford University, USA+Statistics, Stanford University, USA; Management Science & Engineering, Stanford University, USA+Electrical Engineering, Stanford University, USA+Statistics, Stanford University, USA", "aff_domain": "stanford.edu;stanford.edu; ; ", "email": "stanford.edu;stanford.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/namkoong17a.html", "aff_unique_index": "0+0+0;0+0;0+0;0+0+0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "Management Science & Engineering", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0+0+0;0+0;0+0;0+0+0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0+0+0;0+0;0+0;0+0+0", "aff_country_unique": "United States" }, { "title": "Adversarial Feature Matching for Text Generation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/773", "id": "773", "author_site": "Yizhe Zhang, Zhe Gan, Kai Fan, Zhi Chen, Ricardo Henao, Dinghan Shen, Lawrence Carin", "author": "Yizhe Zhang; Zhe Gan; Kai Fan; Zhi Chen; Ricardo Henao; Dinghan Shen; Lawrence Carin", "abstract": "The Generative Adversarial Network (GAN) has achieved great success in generating realistic (real-valued) synthetic data. However, convergence issues and difficulties dealing with discrete data hinder the applicability of GAN to text. We propose a framework for generating realistic text via adversarial training. We employ a long short-term memory network as generator, and a convolutional network as discriminator. Instead of using the standard objective of GAN, we propose matching the high-dimensional latent feature distributions of real and synthetic sentences, via a kernelized discrepancy metric. This eases adversarial training by alleviating the mode-collapsing problem. Our experiments show superior performance in quantitative evaluation, and demonstrate that our model can generate realistic-looking sentences.", "bibtex": "@InProceedings{pmlr-v70-zhang17b,\n title = \t {Adversarial Feature Matching for Text Generation},\n author = {Yizhe Zhang and Zhe Gan and Kai Fan and Zhi Chen and Ricardo Henao and Dinghan Shen and Lawrence Carin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4006--4015},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17b/zhang17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17b.html},\n abstract = \t {The Generative Adversarial Network (GAN) has achieved great success in generating realistic (real-valued) synthetic data. However, convergence issues and difficulties dealing with discrete data hinder the applicability of GAN to text. We propose a framework for generating realistic text via adversarial training. We employ a long short-term memory network as generator, and a convolutional network as discriminator. Instead of using the standard objective of GAN, we propose matching the high-dimensional latent feature distributions of real and synthetic sentences, via a kernelized discrepancy metric. This eases adversarial training by alleviating the mode-collapsing problem. Our experiments show superior performance in quantitative evaluation, and demonstrate that our model can generate realistic-looking sentences.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17b/zhang17b.pdf", "supp": "", "pdf_size": 800133, "gs_citation": 487, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11561684801033759674&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Duke University; Duke University; Duke University; Duke University; Duke University; Duke University; Duke University", "aff_domain": "duke.edu; ; ; ; ; ; ", "email": "duke.edu; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/zhang17b.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative Adversarial Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/671", "id": "671", "author_site": "Lars Mescheder, Sebastian Nowozin, Andreas Geiger", "author": "Lars Mescheder; Sebastian Nowozin; Andreas Geiger", "abstract": "Variational Autoencoders (VAEs) are expressive latent variable models that can be used to learn complex probability distributions from training data. However, the quality of the resulting model crucially relies on the expressiveness of the inference model. We introduce Adversarial Variational Bayes (AVB), a technique for training Variational Autoencoders with arbitrarily expressive inference models. We achieve this by introducing an auxiliary discriminative network that allows to rephrase the maximum-likelihood-problem as a two-player game, hence establishing a principled connection between VAEs and Generative Adversarial Networks (GANs). We show that in the nonparametric limit our method yields an exact maximum-likelihood assignment for the parameters of the generative model, as well as the exact posterior distribution over the latent variables given an observation. Contrary to competing approaches which combine VAEs with GANs, our approach has a clear theoretical justification, retains most advantages of standard Variational Autoencoders and is easy to implement.", "bibtex": "@InProceedings{pmlr-v70-mescheder17a,\n title = \t {Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative Adversarial Networks},\n author = {Lars Mescheder and Sebastian Nowozin and Andreas Geiger},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2391--2400},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mescheder17a/mescheder17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mescheder17a.html},\n abstract = \t {Variational Autoencoders (VAEs) are expressive latent variable models that can be used to learn complex probability distributions from training data. However, the quality of the resulting model crucially relies on the expressiveness of the inference model. We introduce Adversarial Variational Bayes (AVB), a technique for training Variational Autoencoders with arbitrarily expressive inference models. We achieve this by introducing an auxiliary discriminative network that allows to rephrase the maximum-likelihood-problem as a two-player game, hence establishing a principled connection between VAEs and Generative Adversarial Networks (GANs). We show that in the nonparametric limit our method yields an exact maximum-likelihood assignment for the parameters of the generative model, as well as the exact posterior distribution over the latent variables given an observation. Contrary to competing approaches which combine VAEs with GANs, our approach has a clear theoretical justification, retains most advantages of standard Variational Autoencoders and is easy to implement.}\n}", "pdf": "http://proceedings.mlr.press/v70/mescheder17a/mescheder17a.pdf", "supp": "", "pdf_size": 669726, "gs_citation": 679, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12656834929147362081&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Autonomous Vision Group, MPI T\u00fcbingen; Microsoft Research Cambridge; Computer Vision and Geometry Group, ETH Z\u00fcrich", "aff_domain": "tuebingen.mpg.de; ; ", "email": "tuebingen.mpg.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mescheder17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Max Planck Institute for Biological Cybernetics;Microsoft;ETH Zurich", "aff_unique_dep": "Autonomous Vision Group;Microsoft Research;Computer Vision and Geometry Group", "aff_unique_url": "https://www.mpi-bcb.de;https://www.microsoft.com/en-us/research/group/microsoft-research-cambridge;https://www.ethz.ch", "aff_unique_abbr": "MPI T\u00fcbingen;MSR Cambridge;ETHZ", "aff_campus_unique_index": "0;1", "aff_campus_unique": "T\u00fcbingen;Cambridge;", "aff_country_unique_index": "0;1;2", "aff_country_unique": "Germany;United Kingdom;Switzerland" }, { "title": "Algebraic Variety Models for High-Rank Matrix Completion", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/879", "id": "879", "author_site": "Greg Ongie, Laura Balzano, Rebecca Willett, Robert Nowak", "author": "Greg Ongie; Rebecca Willett; Robert D. Nowak; Laura Balzano", "abstract": "We consider a non-linear generalization of low-rank matrix completion to the case where the data belongs to an algebraic variety, i.e., each data point is a solution to a system of polynomial equations. In this case the original matrix is possibly high-rank, but it becomes low-rank after mapping each column to a higher dimensional space of monomial features. Algebraic varieties capture a range of well-studied linear models, including affine subspaces and their union, but also quadratic and higher degree curves and surfaces. We study the sampling requirements for a general variety model with a focus on the union of affine subspaces. We propose an efficient matrix completion algorithm that minimizes a convex or non-convex surrogate of the rank of the lifted matrix. Our algorithm uses the well-known \u201ckernel trick\u201d to avoid working directly with the high-dimensional lifted data matrix and scales efficiently with data size. We show the proposed algorithm is able to recover synthetically generated data up to the predicted sampling complexity bounds. The algorithm also outperforms standard techniques in experiments with real data.", "bibtex": "@InProceedings{pmlr-v70-ongie17a,\n title = \t {Algebraic Variety Models for High-Rank Matrix Completion},\n author = {Greg Ongie and Rebecca Willett and Robert D. Nowak and Laura Balzano},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2691--2700},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ongie17a/ongie17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ongie17a.html},\n abstract = \t {We consider a non-linear generalization of low-rank matrix completion to the case where the data belongs to an algebraic variety, i.e., each data point is a solution to a system of polynomial equations. In this case the original matrix is possibly high-rank, but it becomes low-rank after mapping each column to a higher dimensional space of monomial features. Algebraic varieties capture a range of well-studied linear models, including affine subspaces and their union, but also quadratic and higher degree curves and surfaces. We study the sampling requirements for a general variety model with a focus on the union of affine subspaces. We propose an efficient matrix completion algorithm that minimizes a convex or non-convex surrogate of the rank of the lifted matrix. Our algorithm uses the well-known \u201ckernel trick\u201d to avoid working directly with the high-dimensional lifted data matrix and scales efficiently with data size. We show the proposed algorithm is able to recover synthetically generated data up to the predicted sampling complexity bounds. The algorithm also outperforms standard techniques in experiments with real data.}\n}", "pdf": "http://proceedings.mlr.press/v70/ongie17a/ongie17a.pdf", "supp": "", "pdf_size": 1189545, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4997904276471500473&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Department of EECS, University of Michigan, Ann Arbor, Michigan, USA+Department of ECE, University of Wisconsin, Madison, Wisconsin, USA; Department of ECE, University of Wisconsin, Madison, Wisconsin, USA; Department of ECE, University of Wisconsin, Madison, Wisconsin, USA; Department of EECS, University of Michigan, Ann Arbor, Michigan, USA", "aff_domain": "umich.edu; ; ; ", "email": "umich.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/ongie17a.html", "aff_unique_index": "0+1;1;1;0", "aff_unique_norm": "University of Michigan;University of Wisconsin-Madison", "aff_unique_dep": "Department of EECS;Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.umich.edu;https://www.wisc.edu", "aff_unique_abbr": "UM;UW-Madison", "aff_campus_unique_index": "0+1;1;1;0", "aff_campus_unique": "Ann Arbor;Madison", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United States" }, { "title": "Algorithmic Stability and Hypothesis Complexity", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/650", "id": "650", "author_site": "Tongliang Liu, G\u00e1bor Lugosi, Gergely Neu, Dacheng Tao", "author": "Tongliang Liu; G\u00e1bor Lugosi; Gergely Neu; Dacheng Tao", "abstract": "We introduce a notion of algorithmic stability of learning algorithms\u2014that we term", "bibtex": "@InProceedings{pmlr-v70-liu17c,\n title = \t {Algorithmic Stability and Hypothesis Complexity},\n author = {Tongliang Liu and G{\\'a}bor Lugosi and Gergely Neu and Dacheng Tao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2159--2167},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17c/liu17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17c.html},\n abstract = \t {We introduce a notion of algorithmic stability of learning algorithms\u2014that we term", "pdf": "http://proceedings.mlr.press/v70/liu17c/liu17c.pdf", "supp": "", "pdf_size": 242003, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7506544744186019706&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "UBTech Sydney AI Institute, School of IT, FEIT, The University of Sydney, Australia+ICREA, Pg. Llus Companys 23, 08010 Barcelona, Spain; Department of Economics and Business, Pompeu Fabra University, Barcelona, Spain+ICREA, Pg. Llus Companys 23, 08010 Barcelona, Spain+Barcelona Graduate School of Economics; AI group, DTIC, Universitat Pompeu Fabra, Barcelona, Spain; UBTech Sydney AI Institute, School of IT, FEIT, The University of Sydney, Australia", "aff_domain": "gmail.com;upf.edu;gmail.com;sydney.edu.au", "email": "gmail.com;upf.edu;gmail.com;sydney.edu.au", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/liu17c.html", "aff_unique_index": "0+1;2+1+3;4;0", "aff_unique_norm": "University of Sydney;Instituci\u00f3 Catalana de Recerca i Estudis Avan\u00e7ats;Pompeu Fabra University;Barcelona Graduate School of Economics;Universitat Pompeu Fabra", "aff_unique_dep": "School of IT, FEIT;;Department of Economics and Business;;DTIC", "aff_unique_url": "https://www.sydney.edu.au;https://www.icrea.cat;https://www.upf.edu;https://www.barcelonagraduateschool.com;https://www.upf.edu", "aff_unique_abbr": "USYD;ICREA;UPF;BGSE;", "aff_campus_unique_index": "0+1;1+1;1;0", "aff_campus_unique": "Sydney;Barcelona;", "aff_country_unique_index": "0+1;1+1+1;1;0", "aff_country_unique": "Australia;Spain" }, { "title": "Algorithms for $\\ell_p$ Low-Rank Approximation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/710", "id": "710", "author_site": "Flavio Chierichetti, Sreenivas Gollapudi, Ravi Kumar, Silvio Lattanzi, Rina Panigrahy, David Woodruff", "author": "Flavio Chierichetti; Sreenivas Gollapudi; Ravi Kumar; Silvio Lattanzi; Rina Panigrahy; David P. Woodruff", "abstract": "We consider the problem of approximating a given matrix by a low-rank matrix so as to minimize the entrywise $\\ell_p$-approximation error, for any $p \\geq 1$; the case $p = 2$ is the classical SVD problem. We obtain the first provably good approximation algorithms for this robust version of low-rank approximation that work for every value of $p$. Our algorithms are simple, easy to implement, work well in practice, and illustrate interesting tradeoffs between the approximation quality, the running time, and the rank of the approximating matrix.", "bibtex": "@InProceedings{pmlr-v70-chierichetti17a,\n title = \t {Algorithms for $\\ell_p$ Low-Rank Approximation},\n author = {Flavio Chierichetti and Sreenivas Gollapudi and Ravi Kumar and Silvio Lattanzi and Rina Panigrahy and David P. Woodruff},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {806--814},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chierichetti17a/chierichetti17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chierichetti17a.html},\n abstract = \t {We consider the problem of approximating a given matrix by a low-rank matrix so as to minimize the entrywise $\\ell_p$-approximation error, for any $p \\geq 1$; the case $p = 2$ is the classical SVD problem. We obtain the first provably good approximation algorithms for this robust version of low-rank approximation that work for every value of $p$. Our algorithms are simple, easy to implement, work well in practice, and illustrate interesting tradeoffs between the approximation quality, the running time, and the rank of the approximating matrix.}\n}", "pdf": "http://proceedings.mlr.press/v70/chierichetti17a/chierichetti17a.pdf", "supp": "", "pdf_size": 2226924, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16979302124183379392&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Sapienza University, Rome, Italy + Google; Google, Mountain View, CA; Google, Zurich, Switzerland; Google, Zurich, Switzerland; Google, Mountain View, CA; IBM Almaden, San Jose, CA", "aff_domain": "di.uniroma1.it;yahoo.com;gmail.com;google.com;gmail.com;us.ibm.com", "email": "di.uniroma1.it;yahoo.com;gmail.com;google.com;gmail.com;us.ibm.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/chierichetti17a.html", "aff_unique_index": "0+1;1;1;1;1;2", "aff_unique_norm": "Sapienza University;Google;IBM", "aff_unique_dep": ";Google;IBM Almaden", "aff_unique_url": "https://www.uniroma1.it;https://www.google.com;https://www.ibm.com/research/almaden", "aff_unique_abbr": "Sapienza;Google;IBM", "aff_campus_unique_index": "0+1;1;2;2;1;3", "aff_campus_unique": "Rome;Mountain View;Zurich;San Jose", "aff_country_unique_index": "0+1;1;2;2;1;1", "aff_country_unique": "Italy;United States;Switzerland" }, { "title": "An Adaptive Test of Independence with Analytic Kernel Embeddings", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/469", "id": "469", "author_site": "Wittawat Jitkrittum, Zoltan Szabo, Arthur Gretton", "author": "Wittawat Jitkrittum; Zolt\u00e1n Szab\u00f3; Arthur Gretton", "abstract": "A new computationally efficient dependence measure, and an adaptive statistical test of independence, are proposed. The dependence measure is the difference between analytic embeddings of the joint distribution and the product of the marginals, evaluated at a finite set of locations (features). These features are chosen so as to maximize a lower bound on the test power, resulting in a test that is data-efficient, and that runs in linear time (with respect to the sample size n). The optimized features can be interpreted as evidence to reject the null hypothesis, indicating regions in the joint domain where the joint distribution and the product of the marginals differ most. Consistency of the independence test is established, for an appropriate choice of features. In real-world benchmarks, independence tests using the optimized features perform comparably to the state-of-the-art quadratic-time HSIC test, and outperform competing O(n) and O(n log n) tests.", "bibtex": "@InProceedings{pmlr-v70-jitkrittum17a,\n title = \t {An Adaptive Test of Independence with Analytic Kernel Embeddings},\n author = {Wittawat Jitkrittum and Zolt{\\'a}n Szab{\\'o} and Arthur Gretton},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1742--1751},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jitkrittum17a/jitkrittum17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jitkrittum17a.html},\n abstract = \t {A new computationally efficient dependence measure, and an adaptive statistical test of independence, are proposed. The dependence measure is the difference between analytic embeddings of the joint distribution and the product of the marginals, evaluated at a finite set of locations (features). These features are chosen so as to maximize a lower bound on the test power, resulting in a test that is data-efficient, and that runs in linear time (with respect to the sample size n). The optimized features can be interpreted as evidence to reject the null hypothesis, indicating regions in the joint domain where the joint distribution and the product of the marginals differ most. Consistency of the independence test is established, for an appropriate choice of features. In real-world benchmarks, independence tests using the optimized features perform comparably to the state-of-the-art quadratic-time HSIC test, and outperform competing O(n) and O(n log n) tests.}\n}", "pdf": "http://proceedings.mlr.press/v70/jitkrittum17a/jitkrittum17a.pdf", "supp": "", "pdf_size": 818118, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=325708685412636617&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Gatsby Unit, University College London, UK; CMAP, \u00c9cole Polytechnique, France; Gatsby Unit, University College London, UK", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/jitkrittum17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University College London;Ecole Polytechnique", "aff_unique_dep": "Gatsby Unit;CMAP", "aff_unique_url": "https://www.ucl.ac.uk;https://www.ecp.fr", "aff_unique_abbr": "UCL;\u00c9cole Polytechnique", "aff_campus_unique_index": "0;0", "aff_campus_unique": "London;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United Kingdom;France" }, { "title": "An Alternative Softmax Operator for Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/781", "id": "781", "author_site": "Kavosh Asadi, Michael L. Littman", "author": "Kavosh Asadi; Michael L. Littman", "abstract": "A softmax operator applied to a set of values acts somewhat like the maximization function and somewhat like an average. In sequential decision making, softmax is often used in settings where it is necessary to maximize utility but also to hedge against problems that arise from putting all of one\u2019s weight behind a single maximum utility decision. The Boltzmann softmax operator is the most commonly used softmax operator in this setting, but we show that this operator is prone to misbehavior. In this work, we study a differentiable softmax operator that, among other properties, is a non-expansion ensuring a convergent behavior in learning and planning. We introduce a variant of SARSA algorithm that, by utilizing the new operator, computes a Boltzmann policy with a state-dependent temperature parameter. We show that the algorithm is convergent and that it performs favorably in practice.", "bibtex": "@InProceedings{pmlr-v70-asadi17a,\n title = \t {An Alternative Softmax Operator for Reinforcement Learning},\n author = {Kavosh Asadi and Michael L. Littman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {243--252},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/asadi17a/asadi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/asadi17a.html},\n abstract = \t {A softmax operator applied to a set of values acts somewhat like the maximization function and somewhat like an average. In sequential decision making, softmax is often used in settings where it is necessary to maximize utility but also to hedge against problems that arise from putting all of one\u2019s weight behind a single maximum utility decision. The Boltzmann softmax operator is the most commonly used softmax operator in this setting, but we show that this operator is prone to misbehavior. In this work, we study a differentiable softmax operator that, among other properties, is a non-expansion ensuring a convergent behavior in learning and planning. We introduce a variant of SARSA algorithm that, by utilizing the new operator, computes a Boltzmann policy with a state-dependent temperature parameter. We show that the algorithm is convergent and that it performs favorably in practice.}\n}", "pdf": "http://proceedings.mlr.press/v70/asadi17a/asadi17a.pdf", "supp": "", "pdf_size": 1307907, "gs_citation": 267, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9246804970085546456&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Brown University, USA; Brown University, USA", "aff_domain": "brown.edu; ", "email": "brown.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/asadi17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Brown University", "aff_unique_dep": "", "aff_unique_url": "https://www.brown.edu", "aff_unique_abbr": "Brown", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "An Analytical Formula of Population Gradient for two-layered ReLU network and its Applications in Convergence and Critical Point Analysis", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/550", "id": "550", "author": "Yuandong Tian", "abstract": "In this paper, we explore theoretical properties of training a two-layered ReLU network $g(\\mathbf{x}; \\mathbf{w}) = \\sum_{j=1}^K \\sigma(\\mathbf{w}_j^\\top\\mathbf{x})$ with centered $d$-dimensional spherical Gaussian input $\\mathbf{x}$ ($\\sigma$=ReLU). We train our network with gradient descent on $\\mathbf{w}$ to mimic the output of a teacher network with the same architecture and fixed parameters $\\mathbf{w}^*$. We show that its population gradient has an analytical formula, leading to interesting theoretical analysis of critical points and convergence behaviors. First, we prove that critical points outside the hyperplane spanned by the teacher parameters (\u201cout-of-plane\u201c) are not isolated and form manifolds, and characterize in-plane critical-point-free regions for two-ReLU case. On the other hand, convergence to $\\mathbf{w}^*$ for one ReLU node is guaranteed with at least $(1-\\epsilon)/2$ probability, if weights are initialized randomly with standard deviation upper-bounded by $O(\\epsilon/\\sqrt{d})$, in accordance with empirical practice. For network with many ReLU nodes, we prove that an infinitesimal perturbation of weight initialization results in convergence towards $\\mathbf{w}^*$ (or its permutation), a phenomenon known as spontaneous symmetric-breaking (SSB) in physics. We assume no independence of ReLU activations. Simulation verifies our findings.", "bibtex": "@InProceedings{pmlr-v70-tian17a,\n title = \t {An Analytical Formula of Population Gradient for two-layered {R}e{LU} network and its Applications in Convergence and Critical Point Analysis},\n author = {Yuandong Tian},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3404--3413},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tian17a/tian17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tian17a.html},\n abstract = \t {In this paper, we explore theoretical properties of training a two-layered ReLU network $g(\\mathbf{x}; \\mathbf{w}) = \\sum_{j=1}^K \\sigma(\\mathbf{w}_j^\\top\\mathbf{x})$ with centered $d$-dimensional spherical Gaussian input $\\mathbf{x}$ ($\\sigma$=ReLU). We train our network with gradient descent on $\\mathbf{w}$ to mimic the output of a teacher network with the same architecture and fixed parameters $\\mathbf{w}^*$. We show that its population gradient has an analytical formula, leading to interesting theoretical analysis of critical points and convergence behaviors. First, we prove that critical points outside the hyperplane spanned by the teacher parameters (\u201cout-of-plane\u201c) are not isolated and form manifolds, and characterize in-plane critical-point-free regions for two-ReLU case. On the other hand, convergence to $\\mathbf{w}^*$ for one ReLU node is guaranteed with at least $(1-\\epsilon)/2$ probability, if weights are initialized randomly with standard deviation upper-bounded by $O(\\epsilon/\\sqrt{d})$, in accordance with empirical practice. For network with many ReLU nodes, we prove that an infinitesimal perturbation of weight initialization results in convergence towards $\\mathbf{w}^*$ (or its permutation), a phenomenon known as spontaneous symmetric-breaking (SSB) in physics. We assume no independence of ReLU activations. Simulation verifies our findings.}\n}", "pdf": "http://proceedings.mlr.press/v70/tian17a/tian17a.pdf", "supp": "", "pdf_size": 789645, "gs_citation": 247, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1350330932045234557&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Facebook AI Research", "aff_domain": "fb.com", "email": "fb.com", "github": "github.com/yuandong-tian/ICML17_ReLU", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/tian17a.html", "aff_unique_index": "0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "An Efficient, Sparsity-Preserving, Online Algorithm for Low-Rank Approximation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/833", "id": "833", "author_site": "David Anderson, Ming Gu", "author": "David Anderson; Ming Gu", "abstract": "Low-rank matrix approximation is a fundamental tool in data analysis for processing large datasets, reducing noise, and finding important signals. In this work, we present a novel truncated LU factorization called Spectrum-Revealing LU (SRLU) for effective low-rank matrix approximation, and develop a fast algorithm to compute an SRLU factorization. We provide both matrix and singular value approximation error bounds for the SRLU approximation computed by our algorithm. Our analysis suggests that SRLU is competitive with the best low-rank matrix approximation methods, deterministic or randomized, in both computational complexity and approximation quality. Numeric experiments illustrate that SRLU preserves sparsity, highlights important data features and variables, can be efficiently updated, and calculates data approximations nearly as accurately as the best possible. To the best of our knowledge this is the first practical variant of the LU factorization for effective and efficient low-rank matrix approximation.", "bibtex": "@InProceedings{pmlr-v70-anderson17a,\n title = \t {An Efficient, Sparsity-Preserving, Online Algorithm for Low-Rank Approximation},\n author = {David Anderson and Ming Gu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {156--165},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/anderson17a/anderson17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/anderson17a.html},\n abstract = \t {Low-rank matrix approximation is a fundamental tool in data analysis for processing large datasets, reducing noise, and finding important signals. In this work, we present a novel truncated LU factorization called Spectrum-Revealing LU (SRLU) for effective low-rank matrix approximation, and develop a fast algorithm to compute an SRLU factorization. We provide both matrix and singular value approximation error bounds for the SRLU approximation computed by our algorithm. Our analysis suggests that SRLU is competitive with the best low-rank matrix approximation methods, deterministic or randomized, in both computational complexity and approximation quality. Numeric experiments illustrate that SRLU preserves sparsity, highlights important data features and variables, can be efficiently updated, and calculates data approximations nearly as accurately as the best possible. To the best of our knowledge this is the first practical variant of the LU factorization for effective and efficient low-rank matrix approximation.}\n}", "pdf": "http://proceedings.mlr.press/v70/anderson17a/anderson17a.pdf", "supp": "", "pdf_size": 1520555, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17661185519283239642&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu;berkeley.edu", "email": "berkeley.edu;berkeley.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/anderson17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "An Infinite Hidden Markov Model With Similarity-Biased Transitions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/649", "id": "649", "author_site": "Colin Dawson, Chaofan Huang, Clayton T. Morrison", "author": "Colin Reimer Dawson; Chaofan Huang; Clayton T. Morrison", "abstract": "We describe a generalization of the Hierarchical Dirichlet Process Hidden Markov Model (HDP-HMM) which is able to encode prior information that state transitions are more likely between \u201cnearby\u201d states. This is accomplished by defining a similarity function on the state space and scaling transition probabilities by pairwise similarities, thereby inducing correlations among the transition distributions. We present an augmented data representation of the model as a Markov Jump Process in which: (1) some jump attempts fail, and (2) the probability of success is proportional to the similarity between the source and destination states. This augmentation restores conditional conjugacy and admits a simple Gibbs sampler. We evaluate the model and inference method on a speaker diarization task and a \u201charmonic parsing\u201d task using four-part chorale data, as well as on several synthetic datasets, achieving favorable comparisons to existing models.", "bibtex": "@InProceedings{pmlr-v70-dawson17a,\n title = \t {An Infinite Hidden {M}arkov Model With Similarity-Biased Transitions},\n author = {Colin Reimer Dawson and Chaofan Huang and Clayton T. Morrison},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {942--950},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dawson17a/dawson17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dawson17a.html},\n abstract = \t {We describe a generalization of the Hierarchical Dirichlet Process Hidden Markov Model (HDP-HMM) which is able to encode prior information that state transitions are more likely between \u201cnearby\u201d states. This is accomplished by defining a similarity function on the state space and scaling transition probabilities by pairwise similarities, thereby inducing correlations among the transition distributions. We present an augmented data representation of the model as a Markov Jump Process in which: (1) some jump attempts fail, and (2) the probability of success is proportional to the similarity between the source and destination states. This augmentation restores conditional conjugacy and admits a simple Gibbs sampler. We evaluate the model and inference method on a speaker diarization task and a \u201charmonic parsing\u201d task using four-part chorale data, as well as on several synthetic datasets, achieving favorable comparisons to existing models.}\n}", "pdf": "http://proceedings.mlr.press/v70/dawson17a/dawson17a.pdf", "supp": "", "pdf_size": 730850, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11012384618866485632&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Oberlin College, Oberlin, OH, USA; Oberlin College, Oberlin, OH, USA; The University of Arizona, Tucson, AZ, USA", "aff_domain": "oberlin.edu; ; ", "email": "oberlin.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/dawson17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Oberlin College;University of Arizona", "aff_unique_dep": ";", "aff_unique_url": "https://www.oberlin.edu;https://www.arizona.edu", "aff_unique_abbr": "Oberlin;UA", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Oberlin;Tucson", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Analogical Inference for Multi-relational Embeddings", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/620", "id": "620", "author_site": "Hanxiao Liu, Yuexin Wu, Yiming Yang", "author": "Hanxiao Liu; Yuexin Wu; Yiming Yang", "abstract": "Large-scale multi-relational embedding refers to the task of learning the latent representations for entities and relations in large knowledge graphs. An effective and scalable solution for this problem is crucial for the true success of knowledge-based inference in a broad range of applications. This paper proposes a novel framework for optimizing the latent representations with respect to the", "bibtex": "@InProceedings{pmlr-v70-liu17d,\n title = \t {Analogical Inference for Multi-relational Embeddings},\n author = {Hanxiao Liu and Yuexin Wu and Yiming Yang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2168--2178},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17d/liu17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17d.html},\n abstract = \t {Large-scale multi-relational embedding refers to the task of learning the latent representations for entities and relations in large knowledge graphs. An effective and scalable solution for this problem is crucial for the true success of knowledge-based inference in a broad range of applications. This paper proposes a novel framework for optimizing the latent representations with respect to the", "pdf": "http://proceedings.mlr.press/v70/liu17d/liu17d.pdf", "supp": "", "pdf_size": 285610, "gs_citation": 514, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4290007913375024637&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/liu17d.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Analysis and Optimization of Graph Decompositions by Lifted Multicuts", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/476", "id": "476", "author_site": "Andrea Hornakova, Jan-Hendrik Lange, Bjoern Andres", "author": "Andrea Hor\u0148\u00e1kov\u00e1; Jan-Hendrik Lange; Bjoern Andres", "abstract": "We study the set of all decompositions (clusterings) of a graph through its characterization as a set of lifted multicuts. This leads us to practically relevant insights related to the definition of classes of decompositions by must-join and must-cut constraints and related to the comparison of clusterings by metrics. To find optimal decompositions defined by minimum cost lifted multicuts, we establish some properties of some facets of lifted multicut polytopes, define efficient separation procedures and apply these in a branch-and-cut algorithm.", "bibtex": "@InProceedings{pmlr-v70-hornakova17a,\n title = \t {Analysis and Optimization of Graph Decompositions by Lifted Multicuts},\n author = {Andrea Hor{\\v{n}}{\\'a}kov{\\'a} and Jan-Hendrik Lange and Bjoern Andres},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1539--1548},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hornakova17a/hornakova17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hornakova17a.html},\n abstract = \t {We study the set of all decompositions (clusterings) of a graph through its characterization as a set of lifted multicuts. This leads us to practically relevant insights related to the definition of classes of decompositions by must-join and must-cut constraints and related to the comparison of clusterings by metrics. To find optimal decompositions defined by minimum cost lifted multicuts, we establish some properties of some facets of lifted multicut polytopes, define efficient separation procedures and apply these in a branch-and-cut algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v70/hornakova17a/hornakova17a.pdf", "supp": "", "pdf_size": 416725, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8545392205198910737&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Max Planck Institute for Informatics, Saarbr \u00a8ucken, Germany; Max Planck Institute for Informatics, Saarbr \u00a8ucken, Germany; Max Planck Institute for Informatics, Saarbr \u00a8ucken, Germany", "aff_domain": "mpi-inf.mpg.de; ;mpi-inf.mpg.de", "email": "mpi-inf.mpg.de; ;mpi-inf.mpg.de", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hornakova17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Max Planck Institute for Informatics", "aff_unique_dep": "", "aff_unique_url": "https://mpi-inf.mpg.de", "aff_unique_abbr": "MPII", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Saarbr\u00fccken", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Analytical Guarantees on Numerical Precision of Deep Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/743", "id": "743", "author_site": "Charbel Sakr, Yongjune Kim, Naresh Shanbhag", "author": "Charbel Sakr; Yongjune Kim; Naresh Shanbhag", "abstract": "The acclaimed successes of neural networks often overshadow their tremendous complexity. We focus on numerical precision \u2013 a key parameter defining the complexity of neural networks. First, we present theoretical bounds on the accuracy in presence of limited precision. Interestingly, these bounds can be computed via the back-propagation algorithm. Hence, by combining our theoretical analysis and the back-propagation algorithm, we are able to readily determine the minimum precision needed to preserve accuracy without having to resort to time-consuming fixed-point simulations. We provide numerical evidence showing how our approach allows us to maintain high accuracy but with lower complexity than state-of-the-art binary networks.", "bibtex": "@InProceedings{pmlr-v70-sakr17a,\n title = \t {Analytical Guarantees on Numerical Precision of Deep Neural Networks},\n author = {Charbel Sakr and Yongjune Kim and Naresh Shanbhag},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3007--3016},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sakr17a/sakr17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sakr17a.html},\n abstract = \t {The acclaimed successes of neural networks often overshadow their tremendous complexity. We focus on numerical precision \u2013 a key parameter defining the complexity of neural networks. First, we present theoretical bounds on the accuracy in presence of limited precision. Interestingly, these bounds can be computed via the back-propagation algorithm. Hence, by combining our theoretical analysis and the back-propagation algorithm, we are able to readily determine the minimum precision needed to preserve accuracy without having to resort to time-consuming fixed-point simulations. We provide numerical evidence showing how our approach allows us to maintain high accuracy but with lower complexity than state-of-the-art binary networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/sakr17a/sakr17a.pdf", "supp": "", "pdf_size": 951277, "gs_citation": 123, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7794082213595276633&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", "aff_domain": "illinois.edu;illinois.edu;illinois.edu", "email": "illinois.edu;illinois.edu;illinois.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/sakr17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Illinois Urbana-Champaign", "aff_unique_dep": "", "aff_unique_url": "https://illinois.edu", "aff_unique_abbr": "UIUC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Urbana-Champaign", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Approximate Newton Methods and Their Local Convergence", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/715", "id": "715", "author_site": "Haishan Ye, Luo Luo, Zhihua Zhang", "author": "Haishan Ye; Luo Luo; Zhihua Zhang", "abstract": "Many machine learning models are reformulated as optimization problems. Thus, it is important to solve a large-scale optimization problem in big data applications. Recently, subsampled Newton methods have emerged to attract much attention for optimization due to their efficiency at each iteration, rectified a weakness in the ordinary Newton method of suffering a high cost in each iteration while commanding a high convergence rate. Other efficient stochastic second order methods are also proposed. However, the convergence properties of these methods are still not well understood. There are also several important gaps between the current convergence theory and performance in real applications. In this paper, we aim to fill these gaps. We propose a unifying framework to analyze local convergence properties of second order methods. Based on this framework, our theoretical analysis matches the performance in real applications.", "bibtex": "@InProceedings{pmlr-v70-ye17a,\n title = \t {Approximate {N}ewton Methods and Their Local Convergence},\n author = {Haishan Ye and Luo Luo and Zhihua Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3931--3939},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ye17a/ye17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ye17a.html},\n abstract = \t {Many machine learning models are reformulated as optimization problems. Thus, it is important to solve a large-scale optimization problem in big data applications. Recently, subsampled Newton methods have emerged to attract much attention for optimization due to their efficiency at each iteration, rectified a weakness in the ordinary Newton method of suffering a high cost in each iteration while commanding a high convergence rate. Other efficient stochastic second order methods are also proposed. However, the convergence properties of these methods are still not well understood. There are also several important gaps between the current convergence theory and performance in real applications. In this paper, we aim to fill these gaps. We propose a unifying framework to analyze local convergence properties of second order methods. Based on this framework, our theoretical analysis matches the performance in real applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/ye17a/ye17a.pdf", "supp": "", "pdf_size": 669318, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7620100132333831590&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Shanghai Jiao Tong University, Shanghai, China; Shanghai Jiao Tong University, Shanghai, China; Peking University & Beijing Institute of Big Data Research, Beijing, China", "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;gmail.com", "email": "sjtu.edu.cn;sjtu.edu.cn;gmail.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ye17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Shanghai Jiao Tong University;Peking University", "aff_unique_dep": ";", "aff_unique_url": "https://www.sjtu.edu.cn;http://www.pku.edu.cn", "aff_unique_abbr": "SJTU;Peking U", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Shanghai;Beijing", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Approximate Steepest Coordinate Descent", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/723", "id": "723", "author_site": "Sebastian Stich, Anant Raj, Martin Jaggi", "author": "Sebastian U. Stich; Anant Raj; Martin Jaggi", "abstract": "We propose a new selection rule for the coordinate selection in coordinate descent methods for huge-scale optimization. The efficiency of this novel scheme is provably better than the efficiency of uniformly random selection, and can reach the efficiency of steepest coordinate descent (SCD), enabling an acceleration of a factor of up to $n$, the number of coordinates. In many practical applications, our scheme can be implemented at no extra cost and computational efficiency very close to the faster uniform selection. Numerical experiments with Lasso and Ridge regression show promising improvements, in line with our theoretical guarantees.", "bibtex": "@InProceedings{pmlr-v70-stich17a,\n title = \t {Approximate Steepest Coordinate Descent},\n author = {Sebastian U. Stich and Anant Raj and Martin Jaggi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3251--3259},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/stich17a/stich17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/stich17a.html},\n abstract = \t {We propose a new selection rule for the coordinate selection in coordinate descent methods for huge-scale optimization. The efficiency of this novel scheme is provably better than the efficiency of uniformly random selection, and can reach the efficiency of steepest coordinate descent (SCD), enabling an acceleration of a factor of up to $n$, the number of coordinates. In many practical applications, our scheme can be implemented at no extra cost and computational efficiency very close to the faster uniform selection. Numerical experiments with Lasso and Ridge regression show promising improvements, in line with our theoretical guarantees.}\n}", "pdf": "http://proceedings.mlr.press/v70/stich17a/stich17a.pdf", "supp": "", "pdf_size": 1486487, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8522330098608389483&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "EPFL; Max Planck Institute for Intelligent Systems; EPFL", "aff_domain": "epfl.ch; ; ", "email": "epfl.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/stich17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "EPFL;Max Planck Institute for Intelligent Systems", "aff_unique_dep": ";Intelligent Systems", "aff_unique_url": "https://www.epfl.ch;https://www.mpi-is.mpg.de", "aff_unique_abbr": "EPFL;MPI-IS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Switzerland;Germany" }, { "title": "Asymmetric Tri-training for Unsupervised Domain Adaptation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/616", "id": "616", "author_site": "Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada", "author": "Kuniaki Saito; Yoshitaka Ushiku; Tatsuya Harada", "abstract": "It is important to apply models trained on a large number of labeled samples to different domains because collecting many labeled samples in various domains is expensive. To learn discriminative representations for the target domain, we assume that artificially labeling the target samples can result in a good representation. Tri-training leverages three classifiers equally to provide pseudo-labels to unlabeled samples; however, the method does not assume labeling samples generated from a different domain. In this paper, we propose the use of an", "bibtex": "@InProceedings{pmlr-v70-saito17a,\n title = \t {Asymmetric Tri-training for Unsupervised Domain Adaptation},\n author = {Kuniaki Saito and Yoshitaka Ushiku and Tatsuya Harada},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2988--2997},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/saito17a/saito17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/saito17a.html},\n abstract = \t {It is important to apply models trained on a large number of labeled samples to different domains because collecting many labeled samples in various domains is expensive. To learn discriminative representations for the target domain, we assume that artificially labeling the target samples can result in a good representation. Tri-training leverages three classifiers equally to provide pseudo-labels to unlabeled samples; however, the method does not assume labeling samples generated from a different domain. In this paper, we propose the use of an", "pdf": "http://proceedings.mlr.press/v70/saito17a/saito17a.pdf", "supp": "", "pdf_size": 943019, "gs_citation": 765, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4676245797797796956&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "The University of Tokyo, Tokyo, Japan+RIKEN, Japan; The University of Tokyo, Tokyo, Japan+RIKEN, Japan; The University of Tokyo, Tokyo, Japan+RIKEN, Japan", "aff_domain": "mi.t.u-tokyo.ac.jp;mi.t.u-tokyo.ac.jp;mi.t.u-tokyo.ac.jp", "email": "mi.t.u-tokyo.ac.jp;mi.t.u-tokyo.ac.jp;mi.t.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/saito17a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of Tokyo;RIKEN", "aff_unique_dep": ";", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "UTokyo;RIKEN", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tokyo;", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Asynchronous Distributed Variational Gaussian Process for Regression", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/629", "id": "629", "author_site": "Hao Peng, Shandian Zhe, Xiao Zhang, Yuan Qi", "author": "Hao Peng; Shandian Zhe; Xiao Zhang; Yuan Qi", "abstract": "Gaussian processes (GPs) are powerful non-parametric function estimators. However, their applications are largely limited by the expensive computational cost of the inference procedures. Existing stochastic or distributed synchronous variational inferences, although have alleviated this issue by scaling up GPs to millions of samples, are still far from satisfactory for real-world large applications, where the data sizes are often orders of magnitudes larger, say, billions. To solve this problem, we propose ADVGP, the first Asynchronous Distributed Variational Gaussian Process inference for regression, on the recent large-scale machine learning platform, PARAMETER SERVER. ADVGP uses a novel, flexible variational framework based on a weight space augmentation, and implements the highly efficient, asynchronous proximal gradient optimization. While maintaining comparable or better predictive performance, ADVGP greatly improves upon the efficiency of the existing variational methods. With ADVGP, we effortlessly scale up GP regression to a real-world application with billions of samples and demonstrate an excellent, superior prediction accuracy to the popular linear models.", "bibtex": "@InProceedings{pmlr-v70-peng17a,\n title = \t {Asynchronous Distributed Variational {G}aussian Process for Regression},\n author = {Hao Peng and Shandian Zhe and Xiao Zhang and Yuan Qi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2788--2797},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/peng17a/peng17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/peng17a.html},\n abstract = \t {Gaussian processes (GPs) are powerful non-parametric function estimators. However, their applications are largely limited by the expensive computational cost of the inference procedures. Existing stochastic or distributed synchronous variational inferences, although have alleviated this issue by scaling up GPs to millions of samples, are still far from satisfactory for real-world large applications, where the data sizes are often orders of magnitudes larger, say, billions. To solve this problem, we propose ADVGP, the first Asynchronous Distributed Variational Gaussian Process inference for regression, on the recent large-scale machine learning platform, PARAMETER SERVER. ADVGP uses a novel, flexible variational framework based on a weight space augmentation, and implements the highly efficient, asynchronous proximal gradient optimization. While maintaining comparable or better predictive performance, ADVGP greatly improves upon the efficiency of the existing variational methods. With ADVGP, we effortlessly scale up GP regression to a real-world application with billions of samples and demonstrate an excellent, superior prediction accuracy to the popular linear models.}\n}", "pdf": "http://proceedings.mlr.press/v70/peng17a/peng17a.pdf", "supp": "", "pdf_size": 653094, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=927191844900018391&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Purdue University, West Lafayette, IN, USA+Ant Financial Service Group; Purdue University, West Lafayette, IN, USA; Purdue University, West Lafayette, IN, USA; Ant Financial Service Group", "aff_domain": "alumni.purdue.edu; ; ; ", "email": "alumni.purdue.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/peng17a.html", "aff_unique_index": "0+1;0;0;1", "aff_unique_norm": "Purdue University;Ant Financial", "aff_unique_dep": ";", "aff_unique_url": "https://www.purdue.edu;https://www.antgroup.com", "aff_unique_abbr": "Purdue;Ant Financial", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "West Lafayette;", "aff_country_unique_index": "0+1;0;0;1", "aff_country_unique": "United States;China" }, { "title": "Asynchronous Stochastic Gradient Descent with Delay Compensation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/564", "id": "564", "author_site": "Shuxin Zheng, Qi Meng, Taifeng Wang, Wei Chen, Nenghai Yu, Zhiming Ma, Tie-Yan Liu", "author": "Shuxin Zheng; Qi Meng; Taifeng Wang; Wei Chen; Nenghai Yu; Zhi-Ming Ma; Tie-Yan Liu", "abstract": "With the fast development of deep learning, it has become common to learn big neural networks using massive training data. Asynchronous Stochastic Gradient Descent (ASGD) is widely adopted to fulfill this task for its efficiency, which is, however, known to suffer from the problem of delayed gradients. That is, when a local worker adds its gradient to the global model, the global model may have been updated by other workers and this gradient becomes \u201cdelayed\u201d. We propose a novel technology to compensate this delay, so as to make the optimization behavior of ASGD closer to that of sequential SGD. This is achieved by leveraging Taylor expansion of the gradient function and efficient approximators to the Hessian matrix of the loss function. We call the new algorithm Delay Compensated ASGD (DC-ASGD). We evaluated the proposed algorithm on CIFAR-10 and ImageNet datasets, and the experimental results demonstrate that DC-ASGD outperforms both synchronous SGD and asynchronous SGD, and nearly approaches the performance of sequential SGD.", "bibtex": "@InProceedings{pmlr-v70-zheng17b,\n title = \t {Asynchronous Stochastic Gradient Descent with Delay Compensation},\n author = {Shuxin Zheng and Qi Meng and Taifeng Wang and Wei Chen and Nenghai Yu and Zhi-Ming Ma and Tie-Yan Liu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4120--4129},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zheng17b/zheng17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/zheng17b.html},\n abstract = \t {With the fast development of deep learning, it has become common to learn big neural networks using massive training data. Asynchronous Stochastic Gradient Descent (ASGD) is widely adopted to fulfill this task for its efficiency, which is, however, known to suffer from the problem of delayed gradients. That is, when a local worker adds its gradient to the global model, the global model may have been updated by other workers and this gradient becomes \u201cdelayed\u201d. We propose a novel technology to compensate this delay, so as to make the optimization behavior of ASGD closer to that of sequential SGD. This is achieved by leveraging Taylor expansion of the gradient function and efficient approximators to the Hessian matrix of the loss function. We call the new algorithm Delay Compensated ASGD (DC-ASGD). We evaluated the proposed algorithm on CIFAR-10 and ImageNet datasets, and the experimental results demonstrate that DC-ASGD outperforms both synchronous SGD and asynchronous SGD, and nearly approaches the performance of sequential SGD.}\n}", "pdf": "http://proceedings.mlr.press/v70/zheng17b/zheng17b.pdf", "supp": "", "pdf_size": 380172, "gs_citation": 359, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3806654470347953521&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Science and Technology of China; School of Mathematical Sciences, Peking University; Microsoft Research; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; School of Mathematical Sciences, Peking University; Academy of Mathematics and Systems Science, Chinese Academy of Sciences; Microsoft Research", "aff_domain": "ustc.edu.cn;pku.edu.cn;microsoft.com;microsoft.com;pku.edu.cn;amss.ac.cn;microsoft.com", "email": "ustc.edu.cn;pku.edu.cn;microsoft.com;microsoft.com;pku.edu.cn;amss.ac.cn;microsoft.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/zheng17b.html", "aff_unique_index": "0;1;2;3;1;3;2", "aff_unique_norm": "University of Science and Technology of China;Peking University;Microsoft;Chinese Academy of Sciences", "aff_unique_dep": ";School of Mathematical Sciences;Microsoft Research;Academy of Mathematics and Systems Science", "aff_unique_url": "http://www.ustc.edu.cn;http://www.pku.edu.cn;https://www.microsoft.com/en-us/research;http://www.amss.cas.cn", "aff_unique_abbr": "USTC;PKU;MSR;AMSS", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Beijing", "aff_country_unique_index": "0;0;1;0;0;0;1", "aff_country_unique": "China;United States" }, { "title": "Attentive Recurrent Comparators", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/730", "id": "730", "author_site": "Pranav Shyam, Shubham Gupta, Ambedkar Dukkipati", "author": "Pranav Shyam; Shubham Gupta; Ambedkar Dukkipati", "abstract": "Rapid learning requires flexible representations to quickly adopt to new evidence. We develop a novel class of models called Attentive Recurrent Comparators (ARCs) that form representations of objects by cycling through them and making observations. Using the representations extracted by ARCs, we develop a way of approximating a", "bibtex": "@InProceedings{pmlr-v70-shyam17a,\n title = \t {Attentive Recurrent Comparators},\n author = {Pranav Shyam and Shubham Gupta and Ambedkar Dukkipati},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3173--3181},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shyam17a/shyam17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shyam17a.html},\n abstract = \t {Rapid learning requires flexible representations to quickly adopt to new evidence. We develop a novel class of models called Attentive Recurrent Comparators (ARCs) that form representations of objects by cycling through them and making observations. Using the representations extracted by ARCs, we develop a way of approximating a", "pdf": "http://proceedings.mlr.press/v70/shyam17a/shyam17a.pdf", "supp": "", "pdf_size": 556858, "gs_citation": 161, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16094580685287102974&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science and Engineering, Rashtreeya Vidyalaya College of Engineering, Bengaluru, India+Department of Computer Science and Automation, Indian Institute of Science, Bengaluru, India; Department of Computer Science and Automation, Indian Institute of Science, Bengaluru, India; Department of Computer Science and Automation, Indian Institute of Science, Bengaluru, India", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/shyam17a.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Rashtreeya Vidyalaya College of Engineering;Indian Institute of Science", "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science and Automation", "aff_unique_url": ";https://www.iisc.ac.in", "aff_unique_abbr": ";IISc", "aff_campus_unique_index": "0+0;0;0", "aff_campus_unique": "Bengaluru", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "India" }, { "title": "Automated Curriculum Learning for Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/701", "id": "701", "author_site": "Alex Graves, Marc Bellemare, Jacob Menick, Remi Munos, Koray Kavukcuoglu", "author": "Alex Graves; Marc G. Bellemare; Jacob Menick; R\u00e9mi Munos; Koray Kavukcuoglu", "abstract": "We introduce a method for automatically selecting the path, or syllabus, that a neural network follows through a curriculum so as to maximise learning efficiency. A measure of the amount that the network learns from each data sample is provided as a reward signal to a nonstationary multi-armed bandit algorithm, which then determines a stochastic syllabus. We consider a range of signals derived from two distinct indicators of learning progress: rate of increase in prediction accuracy, and rate of increase in network complexity. Experimental results for LSTM networks on three curricula demonstrate that our approach can significantly accelerate learning, in some cases halving the time required to attain a satisfactory performance level.", "bibtex": "@InProceedings{pmlr-v70-graves17a,\n title = \t {Automated Curriculum Learning for Neural Networks},\n author = {Alex Graves and Marc G. Bellemare and Jacob Menick and R{\\'e}mi Munos and Koray Kavukcuoglu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1311--1320},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/graves17a/graves17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/graves17a.html},\n abstract = \t {We introduce a method for automatically selecting the path, or syllabus, that a neural network follows through a curriculum so as to maximise learning efficiency. A measure of the amount that the network learns from each data sample is provided as a reward signal to a nonstationary multi-armed bandit algorithm, which then determines a stochastic syllabus. We consider a range of signals derived from two distinct indicators of learning progress: rate of increase in prediction accuracy, and rate of increase in network complexity. Experimental results for LSTM networks on three curricula demonstrate that our approach can significantly accelerate learning, in some cases halving the time required to attain a satisfactory performance level.}\n}", "pdf": "http://proceedings.mlr.press/v70/graves17a/graves17a.pdf", "supp": "", "pdf_size": 2978421, "gs_citation": 671, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15665684335628553686&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ; ; ", "email": "google.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/graves17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Automatic Discovery of the Statistical Types of Variables in a Dataset", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/541", "id": "541", "author_site": "Isabel Valera, Zoubin Ghahramani", "author": "Isabel Valera; Zoubin Ghahramani", "abstract": "A common practice in statistics and machine learning is to assume that the statistical data types (e.g., ordinal, categorical or real-valued) of variables, and usually also the likelihood model, is known. However, as the availability of real-world data increases, this assumption becomes too restrictive. Data are often heterogeneous, complex, and improperly or incompletely documented. Surprisingly, despite their practical importance, there is still a lack of tools to automatically discover the statistical types of, as well as appropriate likelihood (noise) models for, the variables in a dataset. In this paper, we fill this gap by proposing a Bayesian method, which accurately discovers the statistical data types in both synthetic and real data.", "bibtex": "@InProceedings{pmlr-v70-valera17a,\n title = \t {Automatic Discovery of the Statistical Types of Variables in a Dataset},\n author = {Isabel Valera and Zoubin Ghahramani},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3521--3529},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/valera17a/valera17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/valera17a.html},\n abstract = \t {A common practice in statistics and machine learning is to assume that the statistical data types (e.g., ordinal, categorical or real-valued) of variables, and usually also the likelihood model, is known. However, as the availability of real-world data increases, this assumption becomes too restrictive. Data are often heterogeneous, complex, and improperly or incompletely documented. Surprisingly, despite their practical importance, there is still a lack of tools to automatically discover the statistical types of, as well as appropriate likelihood (noise) models for, the variables in a dataset. In this paper, we fill this gap by proposing a Bayesian method, which accurately discovers the statistical data types in both synthetic and real data.}\n}", "pdf": "http://proceedings.mlr.press/v70/valera17a/valera17a.pdf", "supp": "", "pdf_size": 939414, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17863158905501512098&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "University of Cambridge; University of Cambridge + Uber AI Labs", "aff_domain": "cam.ac.uk; ", "email": "cam.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/valera17a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "University of Cambridge;Uber", "aff_unique_dep": ";Uber AI Labs", "aff_unique_url": "https://www.cam.ac.uk;https://www.uber.com", "aff_unique_abbr": "Cambridge;Uber AI Labs", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0+1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Averaged-DQN: Variance Reduction and Stabilization for Deep Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/566", "id": "566", "author_site": "Oron Anschel, Nir Baram, Nahum Shimkin", "author": "Oron Anschel; Nir Baram; Nahum Shimkin", "abstract": "Instability and variability of Deep Reinforcement Learning (DRL) algorithms tend to adversely affect their performance. Averaged-DQN is a simple extension to the DQN algorithm, based on averaging previously learned Q-values estimates, which leads to a more stable training procedure and improved performance by reducing approximation error variance in the target values. To understand the effect of the algorithm, we examine the source of value function estimation errors and provide an analytical comparison within a simplified model. We further present experiments on the Arcade Learning Environment benchmark that demonstrate significantly improved stability and performance due to the proposed extension.", "bibtex": "@InProceedings{pmlr-v70-anschel17a,\n title = \t {Averaged-{DQN}: Variance Reduction and Stabilization for Deep Reinforcement Learning},\n author = {Oron Anschel and Nir Baram and Nahum Shimkin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {176--185},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/anschel17a/anschel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/anschel17a.html},\n abstract = \t {Instability and variability of Deep Reinforcement Learning (DRL) algorithms tend to adversely affect their performance. Averaged-DQN is a simple extension to the DQN algorithm, based on averaging previously learned Q-values estimates, which leads to a more stable training procedure and improved performance by reducing approximation error variance in the target values. To understand the effect of the algorithm, we examine the source of value function estimation errors and provide an analytical comparison within a simplified model. We further present experiments on the Arcade Learning Environment benchmark that demonstrate significantly improved stability and performance due to the proposed extension.}\n}", "pdf": "http://proceedings.mlr.press/v70/anschel17a/anschel17a.pdf", "supp": "", "pdf_size": 493613, "gs_citation": 432, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2782485536994169805&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering, Haifa 32000, Israel; Department of Electrical Engineering, Haifa 32000, Israel; Department of Electrical Engineering, Haifa 32000, Israel", "aff_domain": "campus.technion.ac.il;campus.technion.ac.il;ee.technion.ac.il", "email": "campus.technion.ac.il;campus.technion.ac.il;ee.technion.ac.il", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/anschel17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Technion - Israel Institute of Technology", "aff_unique_dep": "Department of Electrical Engineering", "aff_unique_url": "https://www.technion.ac.il/en/", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Haifa", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Axiomatic Attribution for Deep Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/850", "id": "850", "author_site": "Mukund Sundararajan, Ankur Taly, Qiqi Yan", "author": "Mukund Sundararajan; Ankur Taly; Qiqi Yan", "abstract": "We study the problem of attributing the prediction of a deep network to its input features, a problem previously studied by several other works. We identify two fundamental axioms\u2014Sensitivity and Implementation Invariance that attribution methods ought to satisfy. We show that they are not satisfied by most known attribution methods, which we consider to be a fundamental weakness of those methods. We use the axioms to guide the design of a new attribution method called Integrated Gradients. Our method requires no modification to the original network and is extremely simple to implement; it just needs a few calls to the standard gradient operator. We apply this method to a couple of image models, a couple of text models and a chemistry model, demonstrating its ability to debug networks, to extract rules from a network, and to enable users to engage with models better.", "bibtex": "@InProceedings{pmlr-v70-sundararajan17a,\n title = \t {Axiomatic Attribution for Deep Networks},\n author = {Mukund Sundararajan and Ankur Taly and Qiqi Yan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3319--3328},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sundararajan17a/sundararajan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sundararajan17a.html},\n abstract = \t {We study the problem of attributing the prediction of a deep network to its input features, a problem previously studied by several other works. We identify two fundamental axioms\u2014Sensitivity and Implementation Invariance that attribution methods ought to satisfy. We show that they are not satisfied by most known attribution methods, which we consider to be a fundamental weakness of those methods. We use the axioms to guide the design of a new attribution method called Integrated Gradients. Our method requires no modification to the original network and is extremely simple to implement; it just needs a few calls to the standard gradient operator. We apply this method to a couple of image models, a couple of text models and a chemistry model, demonstrating its ability to debug networks, to extract rules from a network, and to enable users to engage with models better.}\n}", "pdf": "http://proceedings.mlr.press/v70/sundararajan17a/sundararajan17a.pdf", "supp": "", "pdf_size": 3190072, "gs_citation": 8039, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6002490314140284060&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Google Inc., Mountain View, USA; Google Inc., Mountain View, USA; Google Inc., Mountain View, USA", "aff_domain": "google.com;google.com; ", "email": "google.com;google.com; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/sundararajan17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Inc.", "aff_unique_url": "https://www.google.com", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Batched High-dimensional Bayesian Optimization via Structural Kernel Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/739", "id": "739", "author_site": "Zi Wang, Chengtao Li, Stefanie Jegelka, Pushmeet Kohli", "author": "Zi Wang; Chengtao Li; Stefanie Jegelka; Pushmeet Kohli", "abstract": "Optimization of high-dimensional black-box functions is an extremely challenging problem. While Bayesian optimization has emerged as a popular approach for optimizing black-box functions, its applicability has been limited to low-dimensional problems due to its computational and statistical challenges arising from high-dimensional settings. In this paper, we propose to tackle these challenges by (1) assuming a latent additive structure in the function and inferring it properly for more efficient and effective BO, and (2) performing multiple evaluations in parallel to reduce the number of iterations required by the method. Our novel approach learns the latent structure with Gibbs sampling and constructs batched queries using determinantal point processes. Experimental validations on both synthetic and real-world functions demonstrate that the proposed method outperforms the existing state-of-the-art approaches.", "bibtex": "@InProceedings{pmlr-v70-wang17h,\n title = \t {Batched High-dimensional {B}ayesian Optimization via Structural Kernel Learning},\n author = {Zi Wang and Chengtao Li and Stefanie Jegelka and Pushmeet Kohli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3656--3664},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17h/wang17h.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17h.html},\n abstract = \t {Optimization of high-dimensional black-box functions is an extremely challenging problem. While Bayesian optimization has emerged as a popular approach for optimizing black-box functions, its applicability has been limited to low-dimensional problems due to its computational and statistical challenges arising from high-dimensional settings. In this paper, we propose to tackle these challenges by (1) assuming a latent additive structure in the function and inferring it properly for more efficient and effective BO, and (2) performing multiple evaluations in parallel to reduce the number of iterations required by the method. Our novel approach learns the latent structure with Gibbs sampling and constructs batched queries using determinantal point processes. Experimental validations on both synthetic and real-world functions demonstrate that the proposed method outperforms the existing state-of-the-art approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17h/wang17h.pdf", "supp": "", "pdf_size": 710330, "gs_citation": 151, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16049968622212678048&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Massachusetts, USA+DeepMind, London, UK; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Massachusetts, USA+DeepMind, London, UK; Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Massachusetts, USA; DeepMind, London, UK", "aff_domain": "csail.mit.edu;mit.edu;csail.mit.edu;google.com", "email": "csail.mit.edu;mit.edu;csail.mit.edu;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/wang17h.html", "aff_unique_index": "0+1;0+1;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;DeepMind", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://web.mit.edu;https://deepmind.com", "aff_unique_abbr": "MIT;DeepMind", "aff_campus_unique_index": "0+1;0+1;0;1", "aff_campus_unique": "Massachusetts;London", "aff_country_unique_index": "0+1;0+1;0;1", "aff_country_unique": "United States;United Kingdom" }, { "title": "Bayesian Boolean Matrix Factorisation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/716", "id": "716", "author_site": "Tammo Rukat, Christopher Holmes, Michalis Titsias, Christopher Yau", "author": "Tammo Rukat; Chris C. Holmes; Michalis K. Titsias; Christopher Yau", "abstract": "Boolean matrix factorisation aims to decompose a binary data matrix into an approximate Boolean product of two low rank, binary matrices: one containing meaningful patterns, the other quantifying how the observations can be expressed as a combination of these patterns. We introduce the OrMachine, a probabilistic generative model for Boolean matrix factorisation and derive a Metropolised Gibbs sampler that facilitates efficient parallel posterior inference. On real world and simulated data, our method outperforms all currently existing approaches for Boolean matrix factorisation and completion. This is the first method to provide full posterior inference for Boolean Matrix factorisation which is relevant in applications, e.g. for controlling false positive rates in collaborative filtering and, crucially, improves the interpretability of the inferred patterns. The proposed algorithm scales to large datasets as we demonstrate by analysing single cell gene expression data in 1.3 million mouse brain cells across 11 thousand genes on commodity hardware.", "bibtex": "@InProceedings{pmlr-v70-rukat17a,\n title = \t {{B}ayesian Boolean Matrix Factorisation},\n author = {Tammo Rukat and Chris C. Holmes and Michalis K. Titsias and Christopher Yau},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2969--2978},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rukat17a/rukat17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/rukat17a.html},\n abstract = \t {Boolean matrix factorisation aims to decompose a binary data matrix into an approximate Boolean product of two low rank, binary matrices: one containing meaningful patterns, the other quantifying how the observations can be expressed as a combination of these patterns. We introduce the OrMachine, a probabilistic generative model for Boolean matrix factorisation and derive a Metropolised Gibbs sampler that facilitates efficient parallel posterior inference. On real world and simulated data, our method outperforms all currently existing approaches for Boolean matrix factorisation and completion. This is the first method to provide full posterior inference for Boolean Matrix factorisation which is relevant in applications, e.g. for controlling false positive rates in collaborative filtering and, crucially, improves the interpretability of the inferred patterns. The proposed algorithm scales to large datasets as we demonstrate by analysing single cell gene expression data in 1.3 million mouse brain cells across 11 thousand genes on commodity hardware.}\n}", "pdf": "http://proceedings.mlr.press/v70/rukat17a/rukat17a.pdf", "supp": "", "pdf_size": 965456, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11409648695635809755&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Statistics, University of Oxford, UK; Department of Statistics, University of Oxford, UK + Nuffield Department of Medicine, University of Oxford, UK; Department of Informatics, Athens University of Economics and Business, Greece; Centre for Computational Biology, Institute of Cancer and Genomic Sciences, University of Birmingham, UK", "aff_domain": "stats.ox.ac.uk; ; ; ", "email": "stats.ox.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/rukat17a.html", "aff_unique_index": "0;0+0;1;2", "aff_unique_norm": "University of Oxford;Athens University of Economics and Business;University of Birmingham", "aff_unique_dep": "Department of Statistics;Department of Informatics;Centre for Computational Biology, Institute of Cancer and Genomic Sciences", "aff_unique_url": "https://www.ox.ac.uk;https://www.aueb.gr;https://www.birmingham.ac.uk", "aff_unique_abbr": "Oxford;AUEB;UoB", "aff_campus_unique_index": "0;0+0;1", "aff_campus_unique": "Oxford;Athens;", "aff_country_unique_index": "0;0+0;1;0", "aff_country_unique": "United Kingdom;Greece" }, { "title": "Bayesian Models of Data Streams with Hierarchical Power Priors", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/775", "id": "775", "author_site": "Andres Masegosa, Thomas D. Nielsen, Helge Langseth, Dario Ramos-Lopez, Antonio Salmeron, Anders Madsen", "author": "Andr\u00e9s Masegosa; Thomas D. Nielsen; Helge Langseth; Dar\u0131\u0301o Ramos-L\u00f3pez; Antonio Salmer\u00f3n; Anders L. Madsen", "abstract": "Making inferences from data streams is a pervasive problem in many modern data analysis applications. But it requires to address the problem of continuous model updating, and adapt to changes or drifts in the underlying data generating distribution. In this paper, we approach these problems from a Bayesian perspective covering general conjugate exponential models. Our proposal makes use of non-conjugate hierarchical priors to explicitly model temporal changes of the model parameters. We also derive a novel variational inference scheme which overcomes the use of non-conjugate priors while maintaining the computational efficiency of variational methods over conjugate models. The approach is validated on three real data sets over three latent variable models.", "bibtex": "@InProceedings{pmlr-v70-masegosa17a,\n title = \t {{B}ayesian Models of Data Streams with Hierarchical Power Priors},\n author = {Andr{\\'e}s Masegosa and Thomas D. Nielsen and Helge Langseth and Dar\\'{\\i}o Ramos-L{\\'o}pez and Antonio Salmer{\\'o}n and Anders L. Madsen},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2334--2343},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/masegosa17a/masegosa17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/masegosa17a.html},\n abstract = \t {Making inferences from data streams is a pervasive problem in many modern data analysis applications. But it requires to address the problem of continuous model updating, and adapt to changes or drifts in the underlying data generating distribution. In this paper, we approach these problems from a Bayesian perspective covering general conjugate exponential models. Our proposal makes use of non-conjugate hierarchical priors to explicitly model temporal changes of the model parameters. We also derive a novel variational inference scheme which overcomes the use of non-conjugate priors while maintaining the computational efficiency of variational methods over conjugate models. The approach is validated on three real data sets over three latent variable models.}\n}", "pdf": "http://proceedings.mlr.press/v70/masegosa17a/masegosa17a.pdf", "supp": "", "pdf_size": 586076, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17862985897941210297&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "http://www.amidsttoolbox.com", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/masegosa17a.html" }, { "title": "Bayesian Optimization with Tree-structured Dependencies", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/666", "id": "666", "author_site": "Rodolphe Jenatton, Cedric Archambeau, Javier Gonz\u00e1lez, Matthias Seeger", "author": "Rodolphe Jenatton; Cedric Archambeau; Javier Gonz\u00e1lez; Matthias Seeger", "abstract": "Bayesian optimization has been successfully used to optimize complex black-box functions whose evaluations are expensive. In many applications, like in deep learning and predictive analytics, the optimization domain is itself complex and structured. In this work, we focus on use cases where this domain exhibits a known dependency structure. The benefit of leveraging this structure is twofold: we explore the search space more efficiently and posterior inference scales more favorably with the number of observations than Gaussian Process-based approaches published in the literature. We introduce a novel surrogate model for Bayesian optimization which combines independent Gaussian Processes with a linear model that encodes a tree-based dependency structure and can transfer information between overlapping decision sequences. We also design a specialized two-step acquisition function that explores the search space more effectively. Our experiments on synthetic tree-structured functions and the tuning of feedforward neural networks trained on a range of binary classification datasets show that our method compares favorably with competing approaches.", "bibtex": "@InProceedings{pmlr-v70-jenatton17a,\n title = \t {{B}ayesian Optimization with Tree-structured Dependencies},\n author = {Rodolphe Jenatton and Cedric Archambeau and Javier Gonz{\\'a}lez and Matthias Seeger},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1655--1664},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jenatton17a/jenatton17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jenatton17a.html},\n abstract = \t {Bayesian optimization has been successfully used to optimize complex black-box functions whose evaluations are expensive. In many applications, like in deep learning and predictive analytics, the optimization domain is itself complex and structured. In this work, we focus on use cases where this domain exhibits a known dependency structure. The benefit of leveraging this structure is twofold: we explore the search space more efficiently and posterior inference scales more favorably with the number of observations than Gaussian Process-based approaches published in the literature. We introduce a novel surrogate model for Bayesian optimization which combines independent Gaussian Processes with a linear model that encodes a tree-based dependency structure and can transfer information between overlapping decision sequences. We also design a specialized two-step acquisition function that explores the search space more effectively. Our experiments on synthetic tree-structured functions and the tuning of feedforward neural networks trained on a range of binary classification datasets show that our method compares favorably with competing approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/jenatton17a/jenatton17a.pdf", "supp": "", "pdf_size": 811694, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5628564565977708084&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Amazon, Berlin, Germany; Amazon, Berlin, Germany; Amazon, Cambridge, United Kingdom; Amazon, Berlin, Germany", "aff_domain": "amazon.de;amazon.de;amazon.co.uk;amazon.de", "email": "amazon.de;amazon.de;amazon.co.uk;amazon.de", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/jenatton17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Amazon", "aff_unique_dep": "Amazon", "aff_unique_url": "https://www.amazon.de", "aff_unique_abbr": "Amazon", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "Berlin;Cambridge", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "Germany;United Kingdom" }, { "title": "Bayesian inference on random simple graphs with power law degree distributions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/570", "id": "570", "author_site": "Juho Lee, Creighton Heaukulani, Zoubin Ghahramani, Lancelot F. James, Seungjin Choi", "author": "Juho Lee; Creighton Heaukulani; Zoubin Ghahramani; Lancelot F. James; Seungjin Choi", "abstract": "We present a model for random simple graphs with power law (i.e., heavy-tailed) degree distributions. To attain this behavior, the edge probabilities in the graph are constructed from Bertoin\u2013Fujita\u2013Roynette\u2013Yor (BFRY) random variables, which have been recently utilized in Bayesian statistics for the construction of power law models in several applications. Our construction readily extends to capture the structure of latent factors, similarly to stochastic block-models, while maintaining its power law degree distribution. The BFRY random variables are well approximated by gamma random variables in a variational Bayesian inference routine, which we apply to several network datasets for which power law degree distributions are a natural assumption. By learning the parameters of the BFRY distribution via probabilistic inference, we are able to automatically select the appropriate power law behavior from the data. In order to further scale our inference procedure, we adopt stochastic gradient ascent routines where the gradients are computed on minibatches (i.e., subsets) of the edges in the graph.", "bibtex": "@InProceedings{pmlr-v70-lee17a,\n title = \t {{B}ayesian inference on random simple graphs with power law degree distributions},\n author = {Juho Lee and Creighton Heaukulani and Zoubin Ghahramani and Lancelot F. James and Seungjin Choi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2004--2013},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lee17a/lee17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lee17a.html},\n abstract = \t {We present a model for random simple graphs with power law (i.e., heavy-tailed) degree distributions. To attain this behavior, the edge probabilities in the graph are constructed from Bertoin\u2013Fujita\u2013Roynette\u2013Yor (BFRY) random variables, which have been recently utilized in Bayesian statistics for the construction of power law models in several applications. Our construction readily extends to capture the structure of latent factors, similarly to stochastic block-models, while maintaining its power law degree distribution. The BFRY random variables are well approximated by gamma random variables in a variational Bayesian inference routine, which we apply to several network datasets for which power law degree distributions are a natural assumption. By learning the parameters of the BFRY distribution via probabilistic inference, we are able to automatically select the appropriate power law behavior from the data. In order to further scale our inference procedure, we adopt stochastic gradient ascent routines where the gradients are computed on minibatches (i.e., subsets) of the edges in the graph.}\n}", "pdf": "http://proceedings.mlr.press/v70/lee17a/lee17a.pdf", "supp": "", "pdf_size": 748789, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6840079924762663371&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Pohang University of Science and Technology, Pohang, South Korea; University of Cambridge, Cambridge, UK; Uber AI Labs, San Francisco, CA, USA + University of Cambridge, Cambridge, UK; Hong Kong University of Science and Technology, Hong Kong; Pohang University of Science and Technology, Pohang, South Korea", "aff_domain": "postech.ac.kr; ;eng.cam.ac.uk; ;postech.ac.kr", "email": "postech.ac.kr; ;eng.cam.ac.uk; ;postech.ac.kr", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/lee17a.html", "aff_unique_index": "0;1;2+1;3;0", "aff_unique_norm": "Pohang University of Science and Technology;University of Cambridge;Uber AI Labs;Hong Kong University of Science and Technology", "aff_unique_dep": ";;AI Labs;", "aff_unique_url": "https://www.postech.ac.kr;https://www.cam.ac.uk;https://www.uber.com;https://www.ust.hk", "aff_unique_abbr": "POSTECH;Cambridge;Uber AI Labs;HKUST", "aff_campus_unique_index": "0;1;2+1;3;0", "aff_campus_unique": "Pohang;Cambridge;San Francisco;Hong Kong SAR", "aff_country_unique_index": "0;1;2+1;3;0", "aff_country_unique": "South Korea;United Kingdom;United States;China" }, { "title": "Being Robust (in High Dimensions) Can Be Practical", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/460", "id": "460", "author_site": "Ilias Diakonikolas, Gautam Kamath, Daniel Kane, Jerry Li, Ankur Moitra, Alistair Stewart", "author": "Ilias Diakonikolas; Gautam Kamath; Daniel M. Kane; Jerry Li; Ankur Moitra; Alistair Stewart", "abstract": "Robust estimation is much more challenging in high-dimensions than it is in one-dimension: Most techniques either lead to intractable optimization problems or estimators that can tolerate only a tiny fraction of errors. Recent work in theoretical computer science has shown that, in appropriate distributional models, it is possible to robustly estimate the mean and covariance with polynomial time algorithms that can tolerate a constant fraction of corruptions, independent of the dimension. However, the sample and time complexity of these algorithms is prohibitively large for high-dimensional applications. In this work, we address both of these issues by establishing sample complexity bounds that are optimal, up to logarithmic factors, as well as giving various refinements that allow the algorithms to tolerate a much larger fraction of corruptions. Finally, we show on both synthetic and real data that our algorithms have state-of-the-art performance and suddenly make high-dimensional robust estimation a realistic possibility.", "bibtex": "@InProceedings{pmlr-v70-diakonikolas17a,\n title = \t {Being Robust (in High Dimensions) Can Be Practical},\n author = {Ilias Diakonikolas and Gautam Kamath and Daniel M. Kane and Jerry Li and Ankur Moitra and Alistair Stewart},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {999--1008},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/diakonikolas17a/diakonikolas17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/diakonikolas17a.html},\n abstract = \t {Robust estimation is much more challenging in high-dimensions than it is in one-dimension: Most techniques either lead to intractable optimization problems or estimators that can tolerate only a tiny fraction of errors. Recent work in theoretical computer science has shown that, in appropriate distributional models, it is possible to robustly estimate the mean and covariance with polynomial time algorithms that can tolerate a constant fraction of corruptions, independent of the dimension. However, the sample and time complexity of these algorithms is prohibitively large for high-dimensional applications. In this work, we address both of these issues by establishing sample complexity bounds that are optimal, up to logarithmic factors, as well as giving various refinements that allow the algorithms to tolerate a much larger fraction of corruptions. Finally, we show on both synthetic and real data that our algorithms have state-of-the-art performance and suddenly make high-dimensional robust estimation a realistic possibility.}\n}", "pdf": "http://proceedings.mlr.press/v70/diakonikolas17a/diakonikolas17a.pdf", "supp": "", "pdf_size": 720742, "gs_citation": 293, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=203343319007893569&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of Southern California; Massachusetts Institute of Technology; University of California, San Diego; Massachusetts Institute of Technology; Massachusetts Institute of Technology; University of Southern California", "aff_domain": "usc.edu;csail.mit.edu;cs.ucsd.edu;mit.edu;mit.edu;usc.edu", "email": "usc.edu;csail.mit.edu;cs.ucsd.edu;mit.edu;mit.edu;usc.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/diakonikolas17a.html", "aff_unique_index": "0;1;2;1;1;0", "aff_unique_norm": "University of Southern California;Massachusetts Institute of Technology;University of California, San Diego", "aff_unique_dep": ";;", "aff_unique_url": "https://www.usc.edu;https://web.mit.edu;https://www.ucsd.edu", "aff_unique_abbr": "USC;MIT;UCSD", "aff_campus_unique_index": "0;2;0", "aff_campus_unique": "Los Angeles;;San Diego", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Beyond Filters: Compact Feature Map for Portable Deep Model", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/466", "id": "466", "author_site": "Yunhe Wang, Chang Xu, Chao Xu, Dacheng Tao", "author": "Yunhe Wang; Chang Xu; Chao Xu; Dacheng Tao", "abstract": "Convolutional neural networks (CNNs) have shown extraordinary performance in a number of applications, but they are usually of heavy design for the accuracy reason. Beyond compressing the filters in CNNs, this paper focuses on the redundancy in the feature maps derived from the large number of filters in a layer. We propose to extract intrinsic representation of the feature maps and preserve the discriminability of the features. Circulant matrix is employed to formulate the feature map transformation, which only requires O(dlog d) computation complexity to embed a d-dimensional feature map. The filter is then re-configured to establish the mapping from original input to the new compact feature map, and the resulting network can preserve intrinsic information of the original network with significantly fewer parameters, which not only decreases the online memory for launching CNN but also accelerates the computation speed. Experiments on benchmark image datasets demonstrate the superiority of the proposed algorithm over state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v70-wang17m,\n title = \t {Beyond Filters: Compact Feature Map for Portable Deep Model},\n author = {Yunhe Wang and Chang Xu and Chao Xu and Dacheng Tao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3703--3711},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17m/wang17m.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17m.html},\n abstract = \t {Convolutional neural networks (CNNs) have shown extraordinary performance in a number of applications, but they are usually of heavy design for the accuracy reason. Beyond compressing the filters in CNNs, this paper focuses on the redundancy in the feature maps derived from the large number of filters in a layer. We propose to extract intrinsic representation of the feature maps and preserve the discriminability of the features. Circulant matrix is employed to formulate the feature map transformation, which only requires O(dlog d) computation complexity to embed a d-dimensional feature map. The filter is then re-configured to establish the mapping from original input to the new compact feature map, and the resulting network can preserve intrinsic information of the original network with significantly fewer parameters, which not only decreases the online memory for launching CNN but also accelerates the computation speed. Experiments on benchmark image datasets demonstrate the superiority of the proposed algorithm over state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17m/wang17m.pdf", "supp": "", "pdf_size": 315209, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11652971689623959237&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Key Laboratory of Machine Perception (MOE) and Cooperative Medianet Innovation Center, School of EECS, Peking University, Beijing 100871, P.R. China; UBTech Sydney AI Institute, School of IT, FEIT, The University of Sydney, Darlington, NSW 2008, Australia; Key Laboratory of Machine Perception (MOE) and Cooperative Medianet Innovation Center, School of EECS, Peking University, Beijing 100871, P.R. China; UBTech Sydney AI Institute, School of IT, FEIT, The University of Sydney, Darlington, NSW 2008, Australia", "aff_domain": "pku.edu.cn;sydney.edu.au;cis.pku.edu.cn;sydney.edu.au", "email": "pku.edu.cn;sydney.edu.au;cis.pku.edu.cn;sydney.edu.au", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/wang17m.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "Peking University;University of Sydney", "aff_unique_dep": "School of EECS;School of IT, FEIT", "aff_unique_url": "http://www.pku.edu.cn;https://www.sydney.edu.au", "aff_unique_abbr": "Peking U;USYD", "aff_campus_unique_index": "0;1;0;1", "aff_campus_unique": "Beijing;Darlington", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "China;Australia" }, { "title": "Bidirectional Learning for Time-series Models with Hidden Units", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/528", "id": "528", "author_site": "Takayuki Osogami, Hiroshi Kajino, Taro Sekiyama", "author": "Takayuki Osogami; Hiroshi Kajino; Taro Sekiyama", "abstract": "Hidden units can play essential roles in modeling time-series having long-term dependency or on-linearity but make it difficult to learn associated parameters. Here we propose a way to learn such a time-series model by training a backward model for the time-reversed time-series, where the backward model has a common set of parameters as the original (forward) model. Our key observation is that only a subset of the parameters is hard to learn, and that subset is complementary between the forward model and the backward model. By training both of the two models, we can effectively learn the values of the parameters that are hard to learn if only either of the two models is trained. We apply bidirectional learning to a dynamic Boltzmann machine extended with hidden units. Numerical experiments with synthetic and real datasets clearly demonstrate advantages of bidirectional learning.", "bibtex": "@InProceedings{pmlr-v70-osogami17a,\n title = \t {Bidirectional Learning for Time-series Models with Hidden Units},\n author = {Takayuki Osogami and Hiroshi Kajino and Taro Sekiyama},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2711--2720},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/osogami17a/osogami17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/osogami17a.html},\n abstract = \t {Hidden units can play essential roles in modeling time-series having long-term dependency or on-linearity but make it difficult to learn associated parameters. Here we propose a way to learn such a time-series model by training a backward model for the time-reversed time-series, where the backward model has a common set of parameters as the original (forward) model. Our key observation is that only a subset of the parameters is hard to learn, and that subset is complementary between the forward model and the backward model. By training both of the two models, we can effectively learn the values of the parameters that are hard to learn if only either of the two models is trained. We apply bidirectional learning to a dynamic Boltzmann machine extended with hidden units. Numerical experiments with synthetic and real datasets clearly demonstrate advantages of bidirectional learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/osogami17a/osogami17a.pdf", "supp": "", "pdf_size": 859978, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5911585389593326337&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "IBM Research - Tokyo, Tokyo, Japan; IBM Research - Tokyo, Tokyo, Japan; IBM Research - Tokyo, Tokyo, Japan", "aff_domain": "jp.ibm.com; ; ", "email": "jp.ibm.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/osogami17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research", "aff_unique_url": "https://www.ibm.com/research", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "title": "Boosted Fitted Q-Iteration", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/676", "id": "676", "author_site": "Samuele Tosatto, Matteo Pirotta, Carlo D'Eramo, Marcello Restelli", "author": "Samuele Tosatto; Matteo Pirotta; Carlo D\u2019Eramo; Marcello Restelli", "abstract": "This paper is about the study of B-FQI, an Approximated Value Iteration (AVI) algorithm that exploits a boosting procedure to estimate the action-value function in reinforcement learning problems. B-FQI is an iterative off-line algorithm that, given a dataset of transitions, builds an approximation of the optimal action-value function by summing the approximations of the Bellman residuals across all iterations. The advantage of such approach w.r.t. to other AVI methods is twofold: (1) while keeping the same function space at each iteration, B-FQI can represent more complex functions by considering an additive model; (2) since the Bellman residual decreases as the optimal value function is approached, regression problems become easier as iterations proceed. We study B-FQI both theoretically, providing also a finite-sample error upper bound for it, and empirically, by comparing its performance to the one of FQI in different domains and using different regression techniques.", "bibtex": "@InProceedings{pmlr-v70-tosatto17a,\n title = \t {Boosted Fitted Q-Iteration},\n author = {Samuele Tosatto and Matteo Pirotta and Carlo D'Eramo and Marcello Restelli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3434--3443},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tosatto17a/tosatto17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tosatto17a.html},\n abstract = \t {This paper is about the study of B-FQI, an Approximated Value Iteration (AVI) algorithm that exploits a boosting procedure to estimate the action-value function in reinforcement learning problems. B-FQI is an iterative off-line algorithm that, given a dataset of transitions, builds an approximation of the optimal action-value function by summing the approximations of the Bellman residuals across all iterations. The advantage of such approach w.r.t. to other AVI methods is twofold: (1) while keeping the same function space at each iteration, B-FQI can represent more complex functions by considering an additive model; (2) since the Bellman residual decreases as the optimal value function is approached, regression problems become easier as iterations proceed. We study B-FQI both theoretically, providing also a finite-sample error upper bound for it, and empirically, by comparing its performance to the one of FQI in different domains and using different regression techniques.}\n}", "pdf": "http://proceedings.mlr.press/v70/tosatto17a/tosatto17a.pdf", "supp": "", "pdf_size": 380559, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10311726214533306388&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "Politecnico di Milano; SequeL Team, INRIA Lille - Nord Europe; Politecnico di Milano; Politecnico di Milano", "aff_domain": "polimi.it;inria.fr;polimi.it;polimi.it", "email": "polimi.it;inria.fr;polimi.it;polimi.it", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/tosatto17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Politecnico di Milano;INRIA Lille - Nord Europe", "aff_unique_dep": ";SequeL Team", "aff_unique_url": "https://www.polimi.it;https://www.inria.fr/en/centre/lille-nord-europe", "aff_unique_abbr": "Polimi;INRIA", "aff_campus_unique_index": "1", "aff_campus_unique": ";Lille", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "Italy;France" }, { "title": "Bottleneck Conditional Density Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/824", "id": "824", "author_site": "Rui Shu, Hung Bui, Mohammad Ghavamzadeh", "author": "Rui Shu; Hung H. Bui; Mohammad Ghavamzadeh", "abstract": "We introduce a new framework for training deep generative models for high-dimensional conditional density estimation. The Bottleneck Conditional Density Estimator (BCDE) is a variant of the conditional variational autoencoder (CVAE) that employs layer(s) of stochastic variables as the bottleneck between the input x and target y, where both are high-dimensional. Crucially, we propose a new hybrid training method that blends the conditional generative model with a joint generative model. Hybrid blending is the key to effective training of the BCDE, which avoids overfitting and provides a novel mechanism for leveraging unlabeled data. We show that our hybrid training procedure enables models to achieve competitive results in the MNIST quadrant prediction task in the fully-supervised setting, and sets new benchmarks in the semi-supervised regime for MNIST, SVHN, and CelebA.", "bibtex": "@InProceedings{pmlr-v70-shu17a,\n title = \t {Bottleneck Conditional Density Estimation},\n author = {Rui Shu and Hung H. Bui and Mohammad Ghavamzadeh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3164--3172},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shu17a/shu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shu17a.html},\n abstract = \t {We introduce a new framework for training deep generative models for high-dimensional conditional density estimation. The Bottleneck Conditional Density Estimator (BCDE) is a variant of the conditional variational autoencoder (CVAE) that employs layer(s) of stochastic variables as the bottleneck between the input x and target y, where both are high-dimensional. Crucially, we propose a new hybrid training method that blends the conditional generative model with a joint generative model. Hybrid blending is the key to effective training of the BCDE, which avoids overfitting and provides a novel mechanism for leveraging unlabeled data. We show that our hybrid training procedure enables models to achieve competitive results in the MNIST quadrant prediction task in the fully-supervised setting, and sets new benchmarks in the semi-supervised regime for MNIST, SVHN, and CelebA.}\n}", "pdf": "http://proceedings.mlr.press/v70/shu17a/shu17a.pdf", "supp": "", "pdf_size": 2512071, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18017948320531135706&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Stanford University; Adobe Research; DeepMind", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/shu17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Stanford University;Adobe;DeepMind", "aff_unique_dep": ";Adobe Research;", "aff_unique_url": "https://www.stanford.edu;https://research.adobe.com;https://deepmind.com", "aff_unique_abbr": "Stanford;Adobe;DeepMind", "aff_campus_unique_index": "0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;United Kingdom" }, { "title": "Breaking Locality Accelerates Block Gauss-Seidel", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/502", "id": "502", "author_site": "Stephen Tu, Shivaram Venkataraman, Ashia Wilson, Alex Gittens, Michael Jordan, Benjamin Recht", "author": "Stephen Tu; Shivaram Venkataraman; Ashia C. Wilson; Alex Gittens; Michael I. Jordan; Benjamin Recht", "abstract": "Recent work by Nesterov and Stich (2016) showed that momentum can be used to accelerate the rate of convergence for block Gauss-Seidel in the setting where a fixed partitioning of the coordinates is chosen ahead of time. We show that this setting is too restrictive, constructing instances where breaking locality by running non-accelerated Gauss-Seidel with randomly sampled coordinates substantially outperforms accelerated Gauss-Seidel with any fixed partitioning. Motivated by this finding, we analyze the accelerated block Gauss-Seidel algorithm in the random coordinate sampling setting. Our analysis captures the benefit of acceleration with a new data-dependent parameter which is well behaved when the matrix sub-blocks are well-conditioned. Empirically, we show that accelerated Gauss-Seidel with random coordinate sampling provides speedups for large scale machine learning tasks when compared to non-accelerated Gauss-Seidel and the classical conjugate-gradient algorithm.", "bibtex": "@InProceedings{pmlr-v70-tu17a,\n title = \t {Breaking Locality Accelerates Block {G}auss-{S}eidel},\n author = {Stephen Tu and Shivaram Venkataraman and Ashia C. Wilson and Alex Gittens and Michael I. Jordan and Benjamin Recht},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3482--3491},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tu17a/tu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tu17a.html},\n abstract = \t {Recent work by Nesterov and Stich (2016) showed that momentum can be used to accelerate the rate of convergence for block Gauss-Seidel in the setting where a fixed partitioning of the coordinates is chosen ahead of time. We show that this setting is too restrictive, constructing instances where breaking locality by running non-accelerated Gauss-Seidel with randomly sampled coordinates substantially outperforms accelerated Gauss-Seidel with any fixed partitioning. Motivated by this finding, we analyze the accelerated block Gauss-Seidel algorithm in the random coordinate sampling setting. Our analysis captures the benefit of acceleration with a new data-dependent parameter which is well behaved when the matrix sub-blocks are well-conditioned. Empirically, we show that accelerated Gauss-Seidel with random coordinate sampling provides speedups for large scale machine learning tasks when compared to non-accelerated Gauss-Seidel and the classical conjugate-gradient algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v70/tu17a/tu17a.pdf", "supp": "", "pdf_size": 386516, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15513301595530028468&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "UC Berkeley, Berkeley, California, USA; UC Berkeley, Berkeley, California, USA; UC Berkeley, Berkeley, California, USA; Rensselaer Polytechnic Institute, Troy, New York, USA; UC Berkeley, Berkeley, California, USA; UC Berkeley, Berkeley, California, USA", "aff_domain": "berkeley.edu; ; ; ; ; ", "email": "berkeley.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/tu17a.html", "aff_unique_index": "0;0;0;1;0;0", "aff_unique_norm": "University of California, Berkeley;Rensselaer Polytechnic Institute", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://www.rpi.edu", "aff_unique_abbr": "UC Berkeley;RPI", "aff_campus_unique_index": "0;0;0;1;0;0", "aff_campus_unique": "Berkeley;Troy", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Canopy Fast Sampling with Cover Trees", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/507", "id": "507", "author_site": "Manzil Zaheer, Satwik Kottur, Amr Ahmed, Jose Moura, Alex Smola", "author": "Manzil Zaheer; Satwik Kottur; Amr Ahmed; Jos\u00e9 Moura; Alex Smola", "abstract": "Hierarchical Bayesian models often capture distributions over a very large number of distinct atoms. The need for these models arises when organizing huge amount of unsupervised data, for instance, features extracted using deep convnets that can be exploited to organize abundant unlabeled images. Inference for hierarchical Bayesian models in such cases can be rather nontrivial, leading to approximate approaches. In this work, we propose", "bibtex": "@InProceedings{pmlr-v70-zaheer17b,\n title = \t {Canopy Fast Sampling with Cover Trees},\n author = {Manzil Zaheer and Satwik Kottur and Amr Ahmed and Jos{\\'e} Moura and Alex Smola},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3977--3986},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zaheer17b/zaheer17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/zaheer17b.html},\n abstract = \t {Hierarchical Bayesian models often capture distributions over a very large number of distinct atoms. The need for these models arises when organizing huge amount of unsupervised data, for instance, features extracted using deep convnets that can be exploited to organize abundant unlabeled images. Inference for hierarchical Bayesian models in such cases can be rather nontrivial, leading to approximate approaches. In this work, we propose", "pdf": "http://proceedings.mlr.press/v70/zaheer17b/zaheer17b.pdf", "supp": "", "pdf_size": 2162766, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14687408525173285768&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Carnegie Mellon University + Amazon Web Services; Carnegie Mellon University + Amazon Web Services; Google Inc; Carnegie Mellon University; Carnegie Mellon University + Amazon Web Services", "aff_domain": "cmu.edu; ; ; ; ", "email": "cmu.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/zaheer17b.html", "aff_unique_index": "0+1;0+1;2;0;0+1", "aff_unique_norm": "Carnegie Mellon University;Amazon;Google", "aff_unique_dep": ";Amazon Web Services;Google", "aff_unique_url": "https://www.cmu.edu;https://aws.amazon.com;https://www.google.com", "aff_unique_abbr": "CMU;AWS;Google", "aff_campus_unique_index": ";;1;", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0+0;0+0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Capacity Releasing Diffusion for Speed and Locality", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/827", "id": "827", "author_site": "Di Wang, Kimon Fountoulakis, Monika Henzinger, Michael Mahoney, Satish Rao", "author": "Di Wang; Kimon Fountoulakis; Monika Henzinger; Michael W. Mahoney; Satish Rao", "abstract": "Diffusions and related random walk procedures are of central importance in many areas of machine learning, data analysis, and applied mathematics. Because they spread mass agnostically at each step in an iterative manner, they can sometimes spread mass \u201ctoo aggressively,\u201d thereby failing to find the \u201cright\u201d clusters. We introduce a novel Capacity Releasing Diffusion (CRD) Process, which is both faster and stays more local than the classical spectral diffusion process. As an application, we use our CRD Process to develop an improved local algorithm for graph clustering. Our local graph clustering method can find local clusters in a model of clustering where one begins the CRD Process in a cluster whose vertices are connected better internally than externally by an $O(\\log^2 n)$ factor, where $n$ is the number of nodes in the cluster. Thus, our CRD Process is the first local graph clustering algorithm that is not subject to the well-known quadratic Cheeger barrier. Our result requires a certain smoothness condition, which we expect to be an artifact of our analysis. Our empirical evaluation demonstrates improved results, in particular for realistic social graphs where there are moderately good\u2014but not very good\u2014clusters.", "bibtex": "@InProceedings{pmlr-v70-wang17b,\n title = \t {Capacity Releasing Diffusion for Speed and Locality},\n author = {Di Wang and Kimon Fountoulakis and Monika Henzinger and Michael W. Mahoney and Satish Rao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3598--3607},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17b/wang17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17b.html},\n abstract = \t {Diffusions and related random walk procedures are of central importance in many areas of machine learning, data analysis, and applied mathematics. Because they spread mass agnostically at each step in an iterative manner, they can sometimes spread mass \u201ctoo aggressively,\u201d thereby failing to find the \u201cright\u201d clusters. We introduce a novel Capacity Releasing Diffusion (CRD) Process, which is both faster and stays more local than the classical spectral diffusion process. As an application, we use our CRD Process to develop an improved local algorithm for graph clustering. Our local graph clustering method can find local clusters in a model of clustering where one begins the CRD Process in a cluster whose vertices are connected better internally than externally by an $O(\\log^2 n)$ factor, where $n$ is the number of nodes in the cluster. Thus, our CRD Process is the first local graph clustering algorithm that is not subject to the well-known quadratic Cheeger barrier. Our result requires a certain smoothness condition, which we expect to be an artifact of our analysis. Our empirical evaluation demonstrates improved results, in particular for realistic social graphs where there are moderately good\u2014but not very good\u2014clusters.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17b/wang17b.pdf", "supp": "", "pdf_size": 356942, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3231580898407402284&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "EECS, UC Berkeley, Berkeley, CA, USA; ICSI and Statistics, UC Berkeley, Berkeley, CA, USA; Computer Science, University of Vienna, Vienna, Austria; ICSI and Statistics, UC Berkeley, Berkeley, CA, USA; EECS, UC Berkeley, Berkeley, CA, USA", "aff_domain": "eecs.berkeley.edu; ; ; ; ", "email": "eecs.berkeley.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/wang17b.html", "aff_unique_index": "0;0;1;0;0", "aff_unique_norm": "University of California, Berkeley;University of Vienna", "aff_unique_dep": "Electrical Engineering and Computer Sciences;Computer Science", "aff_unique_url": "https://www.berkeley.edu;https://univie.ac.at", "aff_unique_abbr": "UC Berkeley;Uni Vienna", "aff_campus_unique_index": "0;0;1;0;0", "aff_campus_unique": "Berkeley;Vienna", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "United States;Austria" }, { "title": "ChoiceRank: Identifying Preferences from Node Traffic in Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/519", "id": "519", "author_site": "Lucas Maystre, Matthias Grossglauser", "author": "Lucas Maystre; Matthias Grossglauser", "abstract": "Understanding how users navigate in a network is of high interest in many applications. We consider a setting where only aggregate node-level traffic is observed and tackle the task of learning edge transition probabilities. We cast it as a preference learning problem, and we study a model where choices follow Luce\u2019s axiom. In this case, the $O(n)$ marginal counts of node visits are a sufficient statistic for the $O(n^2)$ transition probabilities. We show how to make the inference problem well-posed regardless of the network\u2019s structure, and we present ChoiceRank, an iterative algorithm that scales to networks that contains billions of nodes and edges. We apply the model to two clickstream datasets and show that it successfully recovers the transition probabilities using only the network structure and marginal (node-level) traffic data. Finally, we also consider an application to mobility networks and apply the model to one year of rides on New York City\u2019s bicycle-sharing system.", "bibtex": "@InProceedings{pmlr-v70-maystre17b,\n title = \t {{C}hoice{R}ank: Identifying Preferences from Node Traffic in Networks},\n author = {Lucas Maystre and Matthias Grossglauser},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2354--2362},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/maystre17b/maystre17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/maystre17b.html},\n abstract = \t {Understanding how users navigate in a network is of high interest in many applications. We consider a setting where only aggregate node-level traffic is observed and tackle the task of learning edge transition probabilities. We cast it as a preference learning problem, and we study a model where choices follow Luce\u2019s axiom. In this case, the $O(n)$ marginal counts of node visits are a sufficient statistic for the $O(n^2)$ transition probabilities. We show how to make the inference problem well-posed regardless of the network\u2019s structure, and we present ChoiceRank, an iterative algorithm that scales to networks that contains billions of nodes and edges. We apply the model to two clickstream datasets and show that it successfully recovers the transition probabilities using only the network structure and marginal (node-level) traffic data. Finally, we also consider an application to mobility networks and apply the model to one year of rides on New York City\u2019s bicycle-sharing system.}\n}", "pdf": "http://proceedings.mlr.press/v70/maystre17b/maystre17b.pdf", "supp": "", "pdf_size": 250087, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=35794481122205661&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland; School of Computer and Communication Sciences, EPFL, Lausanne, Switzerland", "aff_domain": "epfl.ch; ", "email": "epfl.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/maystre17b.html", "aff_unique_index": "0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "School of Computer and Communication Sciences", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Lausanne", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Clustering High Dimensional Dynamic Data Streams", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/626", "id": "626", "author_site": "Lin Yang, Harry Lang, Christian Sohler, Vladimir Braverman, Gereon Frahling", "author": "Vladimir Braverman; Gereon Frahling; Harry Lang; Christian Sohler; Lin F. Yang", "abstract": "We present data streaming algorithms for the $k$-median problem in high-dimensional dynamic geometric data streams, i.e. streams allowing both insertions and deletions of points from a discrete Euclidean space $\\{1, 2, \\ldots \\Delta\\}^d$. Our algorithms use $k \\epsilon^{-2} \\mathrm{poly}(d \\log \\Delta)$ space/time and maintain with high probability a small weighted set of points (a coreset) such that for every set of $k$ centers the cost of the coreset $(1+\\epsilon)$-approximates the cost of the streamed point set. We also provide algorithms that guarantee only positive weights in the coreset with additional logarithmic factors in the space and time complexities. We can use this positively-weighted coreset to compute a $(1+\\epsilon)$-approximation for the $k$-median problem by any efficient offline $k$-median algorithm. All previous algorithms for computing a $(1+\\epsilon)$-approximation for the $k$-median problem over dynamic data streams required space and time exponential in $d$. Our algorithms can be generalized to metric spaces of bounded doubling dimension.", "bibtex": "@InProceedings{pmlr-v70-braverman17a,\n title = \t {Clustering High Dimensional Dynamic Data Streams},\n author = {Vladimir Braverman and Gereon Frahling and Harry Lang and Christian Sohler and Lin F. Yang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {576--585},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/braverman17a/braverman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/braverman17a.html},\n abstract = \t {We present data streaming algorithms for the $k$-median problem in high-dimensional dynamic geometric data streams, i.e. streams allowing both insertions and deletions of points from a discrete Euclidean space $\\{1, 2, \\ldots \\Delta\\}^d$. Our algorithms use $k \\epsilon^{-2} \\mathrm{poly}(d \\log \\Delta)$ space/time and maintain with high probability a small weighted set of points (a coreset) such that for every set of $k$ centers the cost of the coreset $(1+\\epsilon)$-approximates the cost of the streamed point set. We also provide algorithms that guarantee only positive weights in the coreset with additional logarithmic factors in the space and time complexities. We can use this positively-weighted coreset to compute a $(1+\\epsilon)$-approximation for the $k$-median problem by any efficient offline $k$-median algorithm. All previous algorithms for computing a $(1+\\epsilon)$-approximation for the $k$-median problem over dynamic data streams required space and time exponential in $d$. Our algorithms can be generalized to metric spaces of bounded doubling dimension.}\n}", "pdf": "http://proceedings.mlr.press/v70/braverman17a/braverman17a.pdf", "supp": "", "pdf_size": 557356, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7331754285495099090&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": "Johns Hopkins University; Linguee GmbH; Johns Hopkins University; TU Dortmund; Johns Hopkins University", "aff_domain": "jhu.edu; ; ;tu-dortmund.de;jhu.edu", "email": "jhu.edu; ; ;tu-dortmund.de;jhu.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/braverman17a.html", "aff_unique_index": "0;1;0;2;0", "aff_unique_norm": "Johns Hopkins University;Linguee;Technische Universit\u00e4t Dortmund", "aff_unique_dep": ";;", "aff_unique_url": "https://www.jhu.edu;https://www.linguee.com;https://www.tu-dortmund.de", "aff_unique_abbr": "JHU;Linguee;TU Dortmund", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;1;0", "aff_country_unique": "United States;Germany" }, { "title": "Clustering by Sum of Norms: Stochastic Incremental Algorithm, Convergence and Cluster Recovery", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/699", "id": "699", "author_site": "Ashkan Panahi, Devdatt Dubhashi, Fredrik D Johansson, Chiranjib Bhattacharya", "author": "Ashkan Panahi; Devdatt Dubhashi; Fredrik D. Johansson; Chiranjib Bhattacharyya", "abstract": "Standard clustering methods such as K-means, Gaussian mixture models, and hierarchical clustering are beset by local minima, which are sometimes drastically suboptimal. Moreover the number of clusters K must be known in advance. The recently introduced the sum-of-norms (SON) or Clusterpath convex relaxation of k-means and hierarchical clustering shrinks cluster centroids toward one another and ensure a unique global minimizer. We give a scalable stochastic incremental algorithm based on proximal iterations to solve the SON problem with convergence guarantees. We also show that the algorithm recovers clusters under quite general conditions which have a similar form to the unifying proximity condition introduced in the approximation algorithms community (that covers paradigm cases such as Gaussian mixtures and planted partition models). We give experimental results to confirm that our algorithm scales much better than previous methods while producing clusters of comparable quality.", "bibtex": "@InProceedings{pmlr-v70-panahi17a,\n title = \t {Clustering by Sum of Norms: Stochastic Incremental Algorithm, Convergence and Cluster Recovery},\n author = {Ashkan Panahi and Devdatt Dubhashi and Fredrik D. Johansson and Chiranjib Bhattacharyya},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2769--2777},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/panahi17a/panahi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/panahi17a.html},\n abstract = \t {Standard clustering methods such as K-means, Gaussian mixture models, and hierarchical clustering are beset by local minima, which are sometimes drastically suboptimal. Moreover the number of clusters K must be known in advance. The recently introduced the sum-of-norms (SON) or Clusterpath convex relaxation of k-means and hierarchical clustering shrinks cluster centroids toward one another and ensure a unique global minimizer. We give a scalable stochastic incremental algorithm based on proximal iterations to solve the SON problem with convergence guarantees. We also show that the algorithm recovers clusters under quite general conditions which have a similar form to the unifying proximity condition introduced in the approximation algorithms community (that covers paradigm cases such as Gaussian mixtures and planted partition models). We give experimental results to confirm that our algorithm scales much better than previous methods while producing clusters of comparable quality.}\n}", "pdf": "http://proceedings.mlr.press/v70/panahi17a/panahi17a.pdf", "supp": "", "pdf_size": 370685, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16818671155836168916&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "ECE, North Carolina State University, Raleigh, NC; CSE, Chalmers University of Technology, G \u00a8oteborg, Sweden; IMES, MIT, Cambridge, MA; CSA, IISc, Bangalore, India", "aff_domain": "gmail.com; ; ;", "email": "gmail.com; ; ;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/panahi17a.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "North Carolina State University;Chalmers University of Technology;Massachusetts Institute of Technology;Indian Institute of Science", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Computer Science and Engineering (CSE);IMES;Computer Science and Automation", "aff_unique_url": "https://www.ncsu.edu;https://www.chalmers.se;https://www.mit.edu;https://www.iisc.ac.in", "aff_unique_abbr": "NCSU;Chalmers;MIT;IISc", "aff_campus_unique_index": "0;1;2;3", "aff_campus_unique": "Raleigh;G\u00f6teborg;Cambridge;Bangalore", "aff_country_unique_index": "0;1;0;2", "aff_country_unique": "United States;Sweden;India" }, { "title": "Co-clustering through Optimal Transport", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/591", "id": "591", "author_site": "Charlotte Laclau, Ievgen Redko, Basarab Matei, Youn\u00e8s Bennani, Vincent Brault", "author": "Charlotte Laclau; Ievgen Redko; Basarab Matei; Youn\u00e8s Bennani; Vincent Brault", "abstract": "In this paper, we present a novel method for co-clustering, an unsupervised learning approach that aims at discovering homogeneous groups of data instances and features by grouping them simultaneously. The proposed method uses the entropy regularized optimal transport between empirical measures defined on data instances and features in order to obtain an estimated joint probability density function represented by the optimal coupling matrix. This matrix is further factorized to obtain the induced row and columns partitions using multiscale representations approach. To justify our method theoretically, we show how the solution of the regularized optimal transport can be seen from the variational inference perspective thus motivating its use for co-clustering. The algorithm derived for the proposed method and its kernelized version based on the notion of Gromov-Wasserstein distance are fast, accurate and can determine automatically the number of both row and column clusters. These features are vividly demonstrated through extensive experimental evaluations.", "bibtex": "@InProceedings{pmlr-v70-laclau17a,\n title = \t {Co-clustering through Optimal Transport},\n author = {Charlotte Laclau and Ievgen Redko and Basarab Matei and Youn{\\`e}s Bennani and Vincent Brault},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1955--1964},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/laclau17a/laclau17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/laclau17a.html},\n abstract = \t {In this paper, we present a novel method for co-clustering, an unsupervised learning approach that aims at discovering homogeneous groups of data instances and features by grouping them simultaneously. The proposed method uses the entropy regularized optimal transport between empirical measures defined on data instances and features in order to obtain an estimated joint probability density function represented by the optimal coupling matrix. This matrix is further factorized to obtain the induced row and columns partitions using multiscale representations approach. To justify our method theoretically, we show how the solution of the regularized optimal transport can be seen from the variational inference perspective thus motivating its use for co-clustering. The algorithm derived for the proposed method and its kernelized version based on the notion of Gromov-Wasserstein distance are fast, accurate and can determine automatically the number of both row and column clusters. These features are vividly demonstrated through extensive experimental evaluations.}\n}", "pdf": "http://proceedings.mlr.press/v70/laclau17a/laclau17a.pdf", "supp": "", "pdf_size": 676753, "gs_citation": 59, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13731220190422664292&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "CNRS, LIPN, Universit \u00b4e Paris 13 - Sorbonne Paris Cit \u00b4e, France+CNRS, LIG, Univ. Grenoble-Alpes, France; CNRS UMR 5220 - INSERM U1206, Univ. Lyon 1, INSA Lyon, F-69621 Villeurbanne, France; CNRS, LIPN, Universit \u00b4e Paris 13 - Sorbonne Paris Cit \u00b4e, France; CNRS, LIPN, Universit \u00b4e Paris 13 - Sorbonne Paris Cit \u00b4e, France; CNRS, LJK, Univ. Grenoble-Alpes, France", "aff_domain": "univ-grenoble-alpes.fr; ; ; ; ", "email": "univ-grenoble-alpes.fr; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/laclau17a.html", "aff_unique_index": "0+0;1;0;0;2", "aff_unique_norm": "CNRS;CNRS UMR 5220;Universit\u00e9 Grenoble-Alpes", "aff_unique_dep": "LIPN;INSERM U1206;Laboratoire Jean Kuntzmann (LJK)", "aff_unique_url": "https://www.cnrs.fr;;https://www.univ-grenoble-alpes.fr", "aff_unique_abbr": "CNRS;;UGA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "France" }, { "title": "Cognitive Psychology for Deep Neural Networks: A Shape Bias Case Study", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/536", "id": "536", "author_site": "Samuel Ritter, David GT Barrett, Adam Santoro, Matthew Botvinick", "author": "Samuel Ritter; David G. T. Barrett; Adam Santoro; Matt M. Botvinick", "abstract": "Deep neural networks (DNNs) have advanced performance on a wide range of complex tasks, rapidly outpacing our understanding of the nature of their solutions. While past work sought to advance our understanding of these models, none has made use of the rich history of problem descriptions, theories, and experimental methods developed by cognitive psychologists to study the human mind. To explore the potential value of these tools, we chose a well-established analysis from developmental psychology that explains how children learn word labels for objects, and applied that analysis to DNNs. Using datasets of stimuli inspired by the original cognitive psychology experiments, we find that state-of-the-art one shot learning models trained on ImageNet exhibit a similar bias to that observed in humans: they prefer to categorize objects according to shape rather than color. The magnitude of this shape bias varies greatly among architecturally identical, but differently seeded models, and even fluctuates within seeds throughout training, despite nearly equivalent classification performance. These results demonstrate the capability of tools from cognitive psychology for exposing hidden computational properties of DNNs, while concurrently providing us with a computational model for human word learning.", "bibtex": "@InProceedings{pmlr-v70-ritter17a,\n title = \t {Cognitive Psychology for Deep Neural Networks: A Shape Bias Case Study},\n author = {Samuel Ritter and David G. T. Barrett and Adam Santoro and Matt M. Botvinick},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2940--2949},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ritter17a/ritter17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ritter17a.html},\n abstract = \t {Deep neural networks (DNNs) have advanced performance on a wide range of complex tasks, rapidly outpacing our understanding of the nature of their solutions. While past work sought to advance our understanding of these models, none has made use of the rich history of problem descriptions, theories, and experimental methods developed by cognitive psychologists to study the human mind. To explore the potential value of these tools, we chose a well-established analysis from developmental psychology that explains how children learn word labels for objects, and applied that analysis to DNNs. Using datasets of stimuli inspired by the original cognitive psychology experiments, we find that state-of-the-art one shot learning models trained on ImageNet exhibit a similar bias to that observed in humans: they prefer to categorize objects according to shape rather than color. The magnitude of this shape bias varies greatly among architecturally identical, but differently seeded models, and even fluctuates within seeds throughout training, despite nearly equivalent classification performance. These results demonstrate the capability of tools from cognitive psychology for exposing hidden computational properties of DNNs, while concurrently providing us with a computational model for human word learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/ritter17a/ritter17a.pdf", "supp": "", "pdf_size": 1829430, "gs_citation": 260, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7115471509629834986&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com;google.com; ; ", "email": "google.com;google.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/ritter17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Coherence Pursuit: Fast, Simple, and Robust Subspace Recovery", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/527", "id": "527", "author_site": "Mostafa Rahmani, George Atia", "author": "Mostafa Rahmani; George Atia", "abstract": "This paper presents a remarkably simple, yet powerful, algorithm for robust Principal Component Analysis (PCA). In the proposed approach, an outlier is set apart from an inlier by comparing their coherence with the rest of the data points. As inliers lie on a low dimensional subspace, they are likely to have strong mutual coherence provided there are enough inliers. By contrast, outliers do not typically admit low dimensional structures, wherefore an outlier is unlikely to bear strong resemblance with a large number of data points. The mutual coherences are computed by forming the Gram matrix of normalized data points. Subsequently, the subspace is recovered from the span of a small subset of the data points that exhibit strong coherence with the rest of the data. As coherence pursuit only involves one simple matrix multiplication, it is significantly faster than the state of-the-art robust PCA algorithms. We provide a mathematical analysis of the proposed algorithm under a random model for the distribution of the inliers and outliers. It is shown that the proposed method can recover the correct subspace even if the data is predominantly outliers. To the best of our knowledge, this is the first provable robust PCA algorithm that is simultaneously non-iterative, can tolerate a large number of outliers and is robust to linearly dependent outliers.", "bibtex": "@InProceedings{pmlr-v70-rahmani17a,\n title = \t {Coherence Pursuit: Fast, Simple, and Robust Subspace Recovery},\n author = {Mostafa Rahmani and George Atia},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2864--2873},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rahmani17a/rahmani17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/rahmani17a.html},\n abstract = \t {This paper presents a remarkably simple, yet powerful, algorithm for robust Principal Component Analysis (PCA). In the proposed approach, an outlier is set apart from an inlier by comparing their coherence with the rest of the data points. As inliers lie on a low dimensional subspace, they are likely to have strong mutual coherence provided there are enough inliers. By contrast, outliers do not typically admit low dimensional structures, wherefore an outlier is unlikely to bear strong resemblance with a large number of data points. The mutual coherences are computed by forming the Gram matrix of normalized data points. Subsequently, the subspace is recovered from the span of a small subset of the data points that exhibit strong coherence with the rest of the data. As coherence pursuit only involves one simple matrix multiplication, it is significantly faster than the state of-the-art robust PCA algorithms. We provide a mathematical analysis of the proposed algorithm under a random model for the distribution of the inliers and outliers. It is shown that the proposed method can recover the correct subspace even if the data is predominantly outliers. To the best of our knowledge, this is the first provable robust PCA algorithm that is simultaneously non-iterative, can tolerate a large number of outliers and is robust to linearly dependent outliers.}\n}", "pdf": "http://proceedings.mlr.press/v70/rahmani17a/rahmani17a.pdf", "supp": "", "pdf_size": 1560363, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=674211959609239325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of Central Florida, Orlando, Florida, USA; University of Central Florida, Orlando, Florida, USA", "aff_domain": "knights.ucf.edu; ", "email": "knights.ucf.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/rahmani17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Central Florida", "aff_unique_dep": "", "aff_unique_url": "https://www.ucf.edu", "aff_unique_abbr": "UCF", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Orlando", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Coherent Probabilistic Forecasts for Hierarchical Time Series", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/633", "id": "633", "author_site": "Souhaib Ben Taieb, James Taylor, Rob Hyndman", "author": "Souhaib Ben Taieb; James W. Taylor; Rob J. Hyndman", "abstract": "Many applications require forecasts for a hierarchy comprising a set of time series along with aggregates of subsets of these series. Hierarchical forecasting require not only good prediction accuracy at each level of the hierarchy, but also the coherency between different levels \u2014 the property that forecasts add up appropriately across the hierarchy. A fundamental limitation of prior research is the focus on forecasting the mean of each time series. We consider the situation where probabilistic forecasts are needed for each series in the hierarchy, and propose an algorithm to compute predictive distributions rather than mean forecasts only. Our algorithm has the advantage of synthesizing information from different levels in the hierarchy through a sparse forecast combination and a probabilistic hierarchical aggregation. We evaluate the accuracy of our forecasting algorithm on both simulated data and large-scale electricity smart meter data. The results show consistent performance gains compared to state-of-the art methods.", "bibtex": "@InProceedings{pmlr-v70-taieb17a,\n title = \t {Coherent Probabilistic Forecasts for Hierarchical Time Series},\n author = {Souhaib Ben Taieb and James W. Taylor and Rob J. Hyndman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3348--3357},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/taieb17a/taieb17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/taieb17a.html},\n abstract = \t {Many applications require forecasts for a hierarchy comprising a set of time series along with aggregates of subsets of these series. Hierarchical forecasting require not only good prediction accuracy at each level of the hierarchy, but also the coherency between different levels \u2014 the property that forecasts add up appropriately across the hierarchy. A fundamental limitation of prior research is the focus on forecasting the mean of each time series. We consider the situation where probabilistic forecasts are needed for each series in the hierarchy, and propose an algorithm to compute predictive distributions rather than mean forecasts only. Our algorithm has the advantage of synthesizing information from different levels in the hierarchy through a sparse forecast combination and a probabilistic hierarchical aggregation. We evaluate the accuracy of our forecasting algorithm on both simulated data and large-scale electricity smart meter data. The results show consistent performance gains compared to state-of-the art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/taieb17a/taieb17a.pdf", "supp": "", "pdf_size": 454117, "gs_citation": 123, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8643655270257419505&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Monash University, Melbourne, Australia; University of Oxford, Oxford, UK; Monash University, Melbourne, Australia", "aff_domain": "monash.edu; ; ", "email": "monash.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/taieb17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Monash University;University of Oxford", "aff_unique_dep": ";", "aff_unique_url": "https://www.monash.edu;https://www.ox.ac.uk", "aff_unique_abbr": "Monash;Oxford", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Melbourne;Oxford", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Australia;United Kingdom" }, { "title": "Collect at Once, Use Effectively: Making Non-interactive Locally Private Learning Possible", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/860", "id": "860", "author_site": "Kai Zheng, Wenlong Mou, Liwei Wang", "author": "Kai Zheng; Wenlong Mou; Liwei Wang", "abstract": "Non-interactive Local Differential Privacy (LDP) requires data analysts to collect data from users through noisy channel at once. In this paper, we extend the frontiers of Non-interactive LDP learning and estimation from several aspects. For learning with smooth generalized linear losses, we propose an approximate stochastic gradient oracle estimated from non-interactive LDP channel using Chebyshev expansion, which is combined with inexact gradient methods to obtain an efficient algorithm with quasi-polynomial sample complexity bound. For the high-dimensional world, we discover that under $\\ell_2$-norm assumption on data points, high-dimensional sparse linear regression and mean estimation can be achieved with logarithmic dependence on dimension, using random projection and approximate recovery. We also extend our methods to Kernel Ridge Regression. Our work is the first one that makes learning and estimation possible for a broad range of learning tasks under non-interactive LDP model.", "bibtex": "@InProceedings{pmlr-v70-zheng17c,\n title = \t {Collect at Once, Use Effectively: Making Non-interactive Locally Private Learning Possible},\n author = {Kai Zheng and Wenlong Mou and Liwei Wang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4130--4139},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zheng17c/zheng17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/zheng17c.html},\n abstract = \t {Non-interactive Local Differential Privacy (LDP) requires data analysts to collect data from users through noisy channel at once. In this paper, we extend the frontiers of Non-interactive LDP learning and estimation from several aspects. For learning with smooth generalized linear losses, we propose an approximate stochastic gradient oracle estimated from non-interactive LDP channel using Chebyshev expansion, which is combined with inexact gradient methods to obtain an efficient algorithm with quasi-polynomial sample complexity bound. For the high-dimensional world, we discover that under $\\ell_2$-norm assumption on data points, high-dimensional sparse linear regression and mean estimation can be achieved with logarithmic dependence on dimension, using random projection and approximate recovery. We also extend our methods to Kernel Ridge Regression. Our work is the first one that makes learning and estimation possible for a broad range of learning tasks under non-interactive LDP model.}\n}", "pdf": "http://proceedings.mlr.press/v70/zheng17c/zheng17c.pdf", "supp": "", "pdf_size": 310727, "gs_citation": 56, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=796318441823170130&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Key Laboratory of Machine Perception, MOE, School of EECS, Peking University, Beijing, China; Key Laboratory of Machine Perception, MOE, School of EECS, Peking University, Beijing, China; Key Laboratory of Machine Perception, MOE, School of EECS, Peking University, Beijing, China", "aff_domain": "pku.edu.cn;pku.edu.cn;cis.pku.edu.cn", "email": "pku.edu.cn;pku.edu.cn;cis.pku.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zheng17c.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Peking University", "aff_unique_dep": "School of EECS", "aff_unique_url": "http://www.pku.edu.cn", "aff_unique_abbr": "Peking U", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Combined Group and Exclusive Sparsity for Deep Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/474", "id": "474", "author_site": "jaehong yoon, Sung Ju Hwang", "author": "Jaehong Yoon; Sung Ju Hwang", "abstract": "The number of parameters in a deep neural network is usually very large, which helps with its learning capacity but also hinders its scalability and practicality due to memory/time inefficiency and overfitting. To resolve this issue, we propose a sparsity regularization method that exploits both positive and negative correlations among the features to enforce the network to be sparse, and at the same time remove any redundancies among the features to fully utilize the capacity of the network. Specifically, we propose to use an exclusive sparsity regularization based on (1,2)-norm, which promotes competition for features between different weights, thus enforcing them to fit to disjoint sets of features. We further combine the exclusive sparsity with the group sparsity based on (2,1)-norm, to promote both sharing and competition for features in training of a deep neural network. We validate our method on multiple public datasets, and the results show that our method can obtain more compact and efficient networks while also improving the performance over the base networks with full weights, as opposed to existing sparsity regularizations that often obtain efficiency at the expense of prediction accuracy.", "bibtex": "@InProceedings{pmlr-v70-yoon17a,\n title = \t {Combined Group and Exclusive Sparsity for Deep Neural Networks},\n author = {Jaehong Yoon and Sung Ju Hwang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3958--3966},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yoon17a/yoon17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/yoon17a.html},\n abstract = \t {The number of parameters in a deep neural network is usually very large, which helps with its learning capacity but also hinders its scalability and practicality due to memory/time inefficiency and overfitting. To resolve this issue, we propose a sparsity regularization method that exploits both positive and negative correlations among the features to enforce the network to be sparse, and at the same time remove any redundancies among the features to fully utilize the capacity of the network. Specifically, we propose to use an exclusive sparsity regularization based on (1,2)-norm, which promotes competition for features between different weights, thus enforcing them to fit to disjoint sets of features. We further combine the exclusive sparsity with the group sparsity based on (2,1)-norm, to promote both sharing and competition for features in training of a deep neural network. We validate our method on multiple public datasets, and the results show that our method can obtain more compact and efficient networks while also improving the performance over the base networks with full weights, as opposed to existing sparsity regularizations that often obtain efficiency at the expense of prediction accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v70/yoon17a/yoon17a.pdf", "supp": "", "pdf_size": 954328, "gs_citation": 200, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4422974575789410870&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "UNIST, Ulsan, South Korea+AITrics, Seoul, South Korea; UNIST, Ulsan, South Korea+AITrics, Seoul, South Korea", "aff_domain": "unist.ac.kr;unist.ac.kr", "email": "unist.ac.kr;unist.ac.kr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/yoon17a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "Ulsan National Institute of Science and Technology;AITRICS", "aff_unique_dep": ";", "aff_unique_url": "https://www.unist.ac.kr;", "aff_unique_abbr": "UNIST;", "aff_campus_unique_index": "0+1;0+1", "aff_campus_unique": "Ulsan;Seoul", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "South Korea" }, { "title": "Combining Model-Based and Model-Free Updates for Trajectory-Centric Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/853", "id": "853", "author_site": "Yevgen Chebotar, Karol Hausman, Marvin Zhang, Gaurav Sukhatme, Stefan Schaal, Sergey Levine", "author": "Yevgen Chebotar; Karol Hausman; Marvin Zhang; Gaurav Sukhatme; Stefan Schaal; Sergey Levine", "abstract": "Reinforcement learning algorithms for real-world robotic applications must be able to handle complex, unknown dynamical systems while maintaining data-efficient learning. These requirements are handled well by model-free and model-based RL approaches, respectively. In this work, we aim to combine the advantages of these approaches. By focusing on time-varying linear-Gaussian policies, we enable a model-based algorithm based on the linear-quadratic regulator that can be integrated into the model-free framework of path integral policy improvement. We can further combine our method with guided policy search to train arbitrary parameterized policies such as deep neural networks. Our simulation and real-world experiments demonstrate that this method can solve challenging manipulation tasks with comparable or better performance than model-free methods while maintaining the sample efficiency of model-based methods.", "bibtex": "@InProceedings{pmlr-v70-chebotar17a,\n title = \t {Combining Model-Based and Model-Free Updates for Trajectory-Centric Reinforcement Learning},\n author = {Yevgen Chebotar and Karol Hausman and Marvin Zhang and Gaurav Sukhatme and Stefan Schaal and Sergey Levine},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {703--711},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chebotar17a/chebotar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chebotar17a.html},\n abstract = \t {Reinforcement learning algorithms for real-world robotic applications must be able to handle complex, unknown dynamical systems while maintaining data-efficient learning. These requirements are handled well by model-free and model-based RL approaches, respectively. In this work, we aim to combine the advantages of these approaches. By focusing on time-varying linear-Gaussian policies, we enable a model-based algorithm based on the linear-quadratic regulator that can be integrated into the model-free framework of path integral policy improvement. We can further combine our method with guided policy search to train arbitrary parameterized policies such as deep neural networks. Our simulation and real-world experiments demonstrate that this method can solve challenging manipulation tasks with comparable or better performance than model-free methods while maintaining the sample efficiency of model-based methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/chebotar17a/chebotar17a.pdf", "supp": "", "pdf_size": 4619969, "gs_citation": 227, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3758176127297619060&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Southern California, Los Angeles, CA, USA+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; University of Southern California, Los Angeles, CA, USA; University of California Berkeley, Berkeley, CA, USA; University of Southern California, Los Angeles, CA, USA; University of Southern California, Los Angeles, CA, USA+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany; University of California Berkeley, Berkeley, CA, USA", "aff_domain": "usc.edu; ; ; ; ; ", "email": "usc.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/chebotar17a.html", "aff_unique_index": "0+1;0;2;0;0+1;2", "aff_unique_norm": "University of Southern California;Max Planck Institute for Intelligent Systems;University of California, Berkeley", "aff_unique_dep": ";;", "aff_unique_url": "https://www.usc.edu;https://www.mpi-is.mpg.de;https://www.berkeley.edu", "aff_unique_abbr": "USC;MPI-IS;UC Berkeley", "aff_campus_unique_index": "0+1;0;2;0;0+1;2", "aff_campus_unique": "Los Angeles;T\u00fcbingen;Berkeley", "aff_country_unique_index": "0+1;0;0;0;0+1;0", "aff_country_unique": "United States;Germany" }, { "title": "Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/559", "id": "559", "author_site": "Dan Garber, Ohad Shamir, Nati Srebro", "author": "Dan Garber; Ohad Shamir; Nathan Srebro", "abstract": "We study the fundamental problem of Principal Component Analysis in a statistical distributed setting in which each machine out of m stores a sample of n points sampled i.i.d. from a single unknown distribution. We study algorithms for estimating the leading principal component of the population covariance matrix that are both communication-efficient and achieve estimation error of the order of the centralized ERM solution that uses all mn samples. On the negative side, we show that in contrast to results obtained for distributed estimation under convexity assumptions, for the PCA objective, simply averaging the local ERM solutions cannot guarantee error that is consistent with the centralized ERM. We show that this unfortunate phenomena can be remedied by performing a simple correction step which correlates between the individual solutions, and provides an estimator that is consistent with the centralized ERM for sufficiently-large n. We also introduce an iterative distributed algorithm that is applicable in any regime of n, which is based on distributed matrix-vector products. The algorithm gives significant acceleration in terms of communication rounds over previous distributed algorithms, in a wide regime of parameters.", "bibtex": "@InProceedings{pmlr-v70-garber17a,\n title = \t {Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis},\n author = {Dan Garber and Ohad Shamir and Nathan Srebro},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1203--1212},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/garber17a/garber17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/garber17a.html},\n abstract = \t {We study the fundamental problem of Principal Component Analysis in a statistical distributed setting in which each machine out of m stores a sample of n points sampled i.i.d. from a single unknown distribution. We study algorithms for estimating the leading principal component of the population covariance matrix that are both communication-efficient and achieve estimation error of the order of the centralized ERM solution that uses all mn samples. On the negative side, we show that in contrast to results obtained for distributed estimation under convexity assumptions, for the PCA objective, simply averaging the local ERM solutions cannot guarantee error that is consistent with the centralized ERM. We show that this unfortunate phenomena can be remedied by performing a simple correction step which correlates between the individual solutions, and provides an estimator that is consistent with the centralized ERM for sufficiently-large n. We also introduce an iterative distributed algorithm that is applicable in any regime of n, which is based on distributed matrix-vector products. The algorithm gives significant acceleration in terms of communication rounds over previous distributed algorithms, in a wide regime of parameters.}\n}", "pdf": "http://proceedings.mlr.press/v70/garber17a/garber17a.pdf", "supp": "", "pdf_size": 609506, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17564772344519017234&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Technion - Israel Institute of Technology, Haifa, Israel; Weizmann Institute of Science, Rehovot, Israel; Toyota Technological Institute, Illinois, USA", "aff_domain": "technion.ac.il;weizmann.ac.il;ttic.edu", "email": "technion.ac.il;weizmann.ac.il;ttic.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/garber17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Technion - Israel Institute of Technology;Weizmann Institute of Science;Toyota Technological Institute at Chicago", "aff_unique_dep": ";;", "aff_unique_url": "https://www.technion.ac.il;https://www.weizmann.org.il;https://www.tti-chicago.org", "aff_unique_abbr": "Technion;Weizmann;TTI", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Haifa;Rehovot;Chicago", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Israel;United States" }, { "title": "Composing Tree Graphical Models with Persistent Homology Features for Clustering Mixed-Type Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/504", "id": "504", "author_site": "XIUYAN NI, Novi Quadrianto, Yusu Wang, Chao Chen", "author": "Xiuyan Ni; Novi Quadrianto; Yusu Wang; Chao Chen", "abstract": "Clustering data with both continuous and discrete attributes is a challenging task. Existing methods lack a principled probabilistic formulation. In this paper, we propose a clustering method based on a tree-structured graphical model to describe the generation process of mixed-type data. Our tree-structured model factorized into a product of pairwise interactions, and thus localizes the interaction between feature variables of different types. To provide a robust clustering method based on the tree-model, we adopt a topographical view and compute peaks of the density function and their attractive basins for clustering. Furthermore, we leverage the theory from topology data analysis to adaptively merge trivial peaks into large ones in order to achieve meaningful clusterings. Our method outperforms state-of-the-art methods on mixed-type data.", "bibtex": "@InProceedings{pmlr-v70-ni17a,\n title = \t {Composing Tree Graphical Models with Persistent Homology Features for Clustering Mixed-Type Data},\n author = {Xiuyan Ni and Novi Quadrianto and Yusu Wang and Chao Chen},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2622--2631},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ni17a/ni17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ni17a.html},\n abstract = \t {Clustering data with both continuous and discrete attributes is a challenging task. Existing methods lack a principled probabilistic formulation. In this paper, we propose a clustering method based on a tree-structured graphical model to describe the generation process of mixed-type data. Our tree-structured model factorized into a product of pairwise interactions, and thus localizes the interaction between feature variables of different types. To provide a robust clustering method based on the tree-model, we adopt a topographical view and compute peaks of the density function and their attractive basins for clustering. Furthermore, we leverage the theory from topology data analysis to adaptively merge trivial peaks into large ones in order to achieve meaningful clusterings. Our method outperforms state-of-the-art methods on mixed-type data.}\n}", "pdf": "http://proceedings.mlr.press/v70/ni17a/ni17a.pdf", "supp": "", "pdf_size": 382507, "gs_citation": 25, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17879415139415999517&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "City University of New York (CUNY), New York, USA; University of Sussex, Falmer, United Kingdom+National Research University Higher School of Economics, Moscow, Russia; Ohio State University, Columbus, USA; City University of New York (CUNY), New York, USA", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/ni17a.html", "aff_unique_index": "0;1+2;3;0", "aff_unique_norm": "City University of New York;University of Sussex;National Research University Higher School of Economics;Ohio State University", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.cuny.edu;https://www.sussex.ac.uk;https://www.hse.ru;https://www.osu.edu", "aff_unique_abbr": "CUNY;Sussex;HSE;OSU", "aff_campus_unique_index": "0;1+2;3;0", "aff_campus_unique": "New York;Falmer;Moscow;Columbus", "aff_country_unique_index": "0;1+2;0;0", "aff_country_unique": "United States;United Kingdom;Russian Federation" }, { "title": "Compressed Sensing using Generative Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/796", "id": "796", "author_site": "Ashish Bora, Ajil Jalal, Eric Price, Alexandros Dimakis", "author": "Ashish Bora; Ajil Jalal; Eric Price; Alexandros G. Dimakis", "abstract": "The goal of compressed sensing is to estimate a vector from an underdetermined system of noisy linear measurements, by making use of prior knowledge on the structure of vectors in the relevant domain. For almost all results in this literature, the structure is represented by sparsity in a well-chosen basis. We show how to achieve guarantees similar to standard compressed sensing but without employing sparsity at all. Instead, we suppose that vectors lie near the range of a generative model $G: \\mathbb{R}^k \\to \\mathbb{R}^n$. Our main theorem is that, if $G$ is $L$-Lipschitz, then roughly $\\mathcal{O}(k \\log L)$ random Gaussian measurements suffice for an $\\ell_2/\\ell_2$ recovery guarantee. We demonstrate our results using generative models from published variational autoencoder and generative adversarial networks. Our method can use $5$-$10$x fewer measurements than Lasso for the same accuracy.", "bibtex": "@InProceedings{pmlr-v70-bora17a,\n title = \t {Compressed Sensing using Generative Models},\n author = {Ashish Bora and Ajil Jalal and Eric Price and Alexandros G. Dimakis},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {537--546},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bora17a/bora17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bora17a.html},\n abstract = \t {The goal of compressed sensing is to estimate a vector from an underdetermined system of noisy linear measurements, by making use of prior knowledge on the structure of vectors in the relevant domain. For almost all results in this literature, the structure is represented by sparsity in a well-chosen basis. We show how to achieve guarantees similar to standard compressed sensing but without employing sparsity at all. Instead, we suppose that vectors lie near the range of a generative model $G: \\mathbb{R}^k \\to \\mathbb{R}^n$. Our main theorem is that, if $G$ is $L$-Lipschitz, then roughly $\\mathcal{O}(k \\log L)$ random Gaussian measurements suffice for an $\\ell_2/\\ell_2$ recovery guarantee. We demonstrate our results using generative models from published variational autoencoder and generative adversarial networks. Our method can use $5$-$10$x fewer measurements than Lasso for the same accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v70/bora17a/bora17a.pdf", "supp": "", "pdf_size": 5071784, "gs_citation": 1019, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14786655061233662443&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of Texas at Austin, Department of Computer Science; University of Texas at Austin, Department of Electrical and Computer Engineering; University of Texas at Austin, Department of Computer Science; University of Texas at Austin, Department of Electrical and Computer Engineering", "aff_domain": "utexas.edu;utexas.edu;cs.utexas.edu;austin.utexas.edu", "email": "utexas.edu;utexas.edu;cs.utexas.edu;austin.utexas.edu", "github": "https://github.com/AshishBora/csgm1", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bora17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Conditional Accelerated Lazy Stochastic Gradient Descent", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/510", "id": "510", "author_site": "Guanghui , Sebastian Pokutta, Yi Zhou, Daniel Zink", "author": "Guanghui Lan; Sebastian Pokutta; Yi Zhou; Daniel Zink", "abstract": "In this work we introduce a conditional accelerated lazy stochastic gradient descent algorithm with optimal number of calls to a stochastic first-order oracle and convergence rate $O(1/\\epsilon^2)$ improving over the projection-free, Online Frank-Wolfe based stochastic gradient descent of (Hazan and Kale, 2012) with convergence rate $O(1/\\epsilon^4)$.", "bibtex": "@InProceedings{pmlr-v70-lan17a,\n title = \t {Conditional Accelerated Lazy Stochastic Gradient Descent},\n author = {Guanghui Lan and Sebastian Pokutta and Yi Zhou and Daniel Zink},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1965--1974},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lan17a/lan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lan17a.html},\n abstract = \t {In this work we introduce a conditional accelerated lazy stochastic gradient descent algorithm with optimal number of calls to a stochastic first-order oracle and convergence rate $O(1/\\epsilon^2)$ improving over the projection-free, Online Frank-Wolfe based stochastic gradient descent of (Hazan and Kale, 2012) with convergence rate $O(1/\\epsilon^4)$.}\n}", "pdf": "http://proceedings.mlr.press/v70/lan17a/lan17a.pdf", "supp": "", "pdf_size": 2867946, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17732158229315649141&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "ISyE, Georgia Institute of Technology; ISyE, Georgia Institute of Technology; ISyE, Georgia Institute of Technology; ISyE, Georgia Institute of Technology", "aff_domain": "gatech.edu; ; ; ", "email": "gatech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/lan17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "Industrial and Systems Engineering", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Conditional Image Synthesis with Auxiliary Classifier GANs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/792", "id": "792", "author_site": "Augustus Odena, Christopher Olah, Jon Shlens", "author": "Augustus Odena; Christopher Olah; Jonathon Shlens", "abstract": "In this paper we introduce new methods for the improved training of generative adversarial networks (GANs) for image synthesis. We construct a variant of GANs employing label conditioning that results in $128\\times 128$ resolution image samples exhibiting global coherence. We expand on previous work for image quality assessment to provide two new analyses for assessing the discriminability and diversity of samples from class-conditional image synthesis models. These analyses demonstrate that high resolution samples provide class information not present in low resolution samples. Across 1000 ImageNet classes, $128\\times 128$ samples are more than twice as discriminable as artificially resized $32\\times 32$ samples. In addition, 84.7\\% of the classes have samples exhibiting diversity comparable to real ImageNet data.", "bibtex": "@InProceedings{pmlr-v70-odena17a,\n title = \t {Conditional Image Synthesis with Auxiliary Classifier {GAN}s},\n author = {Augustus Odena and Christopher Olah and Jonathon Shlens},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2642--2651},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/odena17a/odena17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/odena17a.html},\n abstract = \t {In this paper we introduce new methods for the improved training of generative adversarial networks (GANs) for image synthesis. We construct a variant of GANs employing label conditioning that results in $128\\times 128$ resolution image samples exhibiting global coherence. We expand on previous work for image quality assessment to provide two new analyses for assessing the discriminability and diversity of samples from class-conditional image synthesis models. These analyses demonstrate that high resolution samples provide class information not present in low resolution samples. Across 1000 ImageNet classes, $128\\times 128$ samples are more than twice as discriminable as artificially resized $32\\times 32$ samples. In addition, 84.7\\% of the classes have samples exhibiting diversity comparable to real ImageNet data.}\n}", "pdf": "http://proceedings.mlr.press/v70/odena17a/odena17a.pdf", "supp": "", "pdf_size": 1493525, "gs_citation": 4533, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14828291299960415366&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Google Brain; Google Brain; Google Brain", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/odena17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Confident Multiple Choice Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/693", "id": "693", "author_site": "Kimin Lee, Changho Hwang, KyoungSoo Park, Jinwoo Shin", "author": "Kimin Lee; Changho Hwang; KyoungSoo Park; Jinwoo Shin", "abstract": "Ensemble methods are arguably the most trustworthy techniques for boosting the performance of machine learning models. Popular independent ensembles (IE) relying on naive averaging/voting scheme have been of typical choice for most applications involving deep neural networks, but they do not consider advanced collaboration among ensemble models. In this paper, we propose new ensemble methods specialized for deep neural networks, called confident multiple choice learning (CMCL): it is a variant of multiple choice learning (MCL) via addressing its overconfidence issue.In particular, the proposed major components of CMCL beyond the original MCL scheme are (i) new loss, i.e., confident oracle loss, (ii) new architecture, i.e., feature sharing and (iii) new training method, i.e., stochastic labeling. We demonstrate the effect of CMCL via experiments on the image classification on CIFAR and SVHN, and the foreground-background segmentation on the iCoseg. In particular, CMCL using 5 residual networks provides 14.05\\% and 6.60\\% relative reductions in the top-1 error rates from the corresponding IE scheme for the classification task on CIFAR and SVHN, respectively.", "bibtex": "@InProceedings{pmlr-v70-lee17b,\n title = \t {Confident Multiple Choice Learning},\n author = {Kimin Lee and Changho Hwang and KyoungSoo Park and Jinwoo Shin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2014--2023},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lee17b/lee17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/lee17b.html},\n abstract = \t {Ensemble methods are arguably the most trustworthy techniques for boosting the performance of machine learning models. Popular independent ensembles (IE) relying on naive averaging/voting scheme have been of typical choice for most applications involving deep neural networks, but they do not consider advanced collaboration among ensemble models. In this paper, we propose new ensemble methods specialized for deep neural networks, called confident multiple choice learning (CMCL): it is a variant of multiple choice learning (MCL) via addressing its overconfidence issue.In particular, the proposed major components of CMCL beyond the original MCL scheme are (i) new loss, i.e., confident oracle loss, (ii) new architecture, i.e., feature sharing and (iii) new training method, i.e., stochastic labeling. We demonstrate the effect of CMCL via experiments on the image classification on CIFAR and SVHN, and the foreground-background segmentation on the iCoseg. In particular, CMCL using 5 residual networks provides 14.05\\% and 6.60\\% relative reductions in the top-1 error rates from the corresponding IE scheme for the classification task on CIFAR and SVHN, respectively.}\n}", "pdf": "http://proceedings.mlr.press/v70/lee17b/lee17b.pdf", "supp": "", "pdf_size": 937011, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3332274487139592248&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Repulic of Korea; School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Repulic of Korea; School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Repulic of Korea; School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Repulic of Korea", "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/lee17b.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Korea Advanced Institute of Science and Technology", "aff_unique_dep": "School of Electrical Engineering", "aff_unique_url": "https://www.kaist.ac.kr", "aff_unique_abbr": "KAIST", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Daejeon", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "South Korea" }, { "title": "Connected Subgraph Detection with Mirror Descent on SDPs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/873", "id": "873", "author_site": "Cem Aksoylar, Orecchia Lorenzo, Venkatesh Saligrama", "author": "Cem Aksoylar; Lorenzo Orecchia; Venkatesh Saligrama", "abstract": "We propose a novel, computationally efficient mirror-descent based optimization framework for subgraph detection in graph-structured data. Our aim is to discover anomalous patterns present in a connected subgraph of a given graph. This problem arises in many applications such as detection of network intrusions, community detection, detection of anomalous events in surveillance videos or disease outbreaks. Since optimization over connected subgraphs is a combinatorial and computationally difficult problem, we propose a convex relaxation that offers a principled approach to incorporating connectivity and conductance constraints on candidate subgraphs. We develop a novel efficient algorithm to solve the relaxed problem, establish convergence guarantees and demonstrate its feasibility and performance with experiments on real and very large simulated networks.", "bibtex": "@InProceedings{pmlr-v70-aksoylar17a,\n title = \t {Connected Subgraph Detection with Mirror Descent on {SDP}s},\n author = {Cem Aksoylar and Lorenzo Orecchia and Venkatesh Saligrama},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {51--59},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/aksoylar17a/aksoylar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/aksoylar17a.html},\n abstract = \t {We propose a novel, computationally efficient mirror-descent based optimization framework for subgraph detection in graph-structured data. Our aim is to discover anomalous patterns present in a connected subgraph of a given graph. This problem arises in many applications such as detection of network intrusions, community detection, detection of anomalous events in surveillance videos or disease outbreaks. Since optimization over connected subgraphs is a combinatorial and computationally difficult problem, we propose a convex relaxation that offers a principled approach to incorporating connectivity and conductance constraints on candidate subgraphs. We develop a novel efficient algorithm to solve the relaxed problem, establish convergence guarantees and demonstrate its feasibility and performance with experiments on real and very large simulated networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/aksoylar17a/aksoylar17a.pdf", "supp": "", "pdf_size": 628969, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1341197433270090430&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Boston University; Boston University; Boston University", "aff_domain": "bu.edu;bu.edu;bu.edu", "email": "bu.edu;bu.edu;bu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/aksoylar17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Boston University", "aff_unique_dep": "", "aff_unique_url": "https://www.bu.edu", "aff_unique_abbr": "BU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Consistency Analysis for Binary Classification Revisited", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/787", "id": "787", "author_site": "Krzysztof Dembczynski, Wojciech Kotlowski, Sanmi Koyejo, Nagarajan Natarajan", "author": "Krzysztof Dembczy\u0144ski; Wojciech Kot\u0142owski; Oluwasanmi Koyejo; Nagarajan Natarajan", "abstract": "Statistical learning theory is at an inflection point enabled by recent advances in understanding and optimizing a wide range of metrics. Of particular interest are non-decomposable metrics such as the F-measure and the Jaccard measure which cannot be represented as a simple average over examples. Non-decomposability is the primary source of difficulty in theoretical analysis, and interestingly has led to two distinct settings and notions of consistency. In this manuscript we analyze both settings, from statistical and algorithmic points of view, to explore the connections and to highlight differences between them for a wide range of metrics. The analysis complements previous results on this topic, clarifies common confusions around both settings, and provides guidance to the theory and practice of binary classification with complex metrics.", "bibtex": "@InProceedings{pmlr-v70-dembczynski17a,\n title = \t {Consistency Analysis for Binary Classification Revisited},\n author = {Krzysztof Dembczy{\\'{n}}ski and Wojciech Kot{\\l}owski and Oluwasanmi Koyejo and Nagarajan Natarajan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {961--969},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dembczynski17a/dembczynski17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dembczynski17a.html},\n abstract = \t {Statistical learning theory is at an inflection point enabled by recent advances in understanding and optimizing a wide range of metrics. Of particular interest are non-decomposable metrics such as the F-measure and the Jaccard measure which cannot be represented as a simple average over examples. Non-decomposability is the primary source of difficulty in theoretical analysis, and interestingly has led to two distinct settings and notions of consistency. In this manuscript we analyze both settings, from statistical and algorithmic points of view, to explore the connections and to highlight differences between them for a wide range of metrics. The analysis complements previous results on this topic, clarifies common confusions around both settings, and provides guidance to the theory and practice of binary classification with complex metrics.}\n}", "pdf": "http://proceedings.mlr.press/v70/dembczynski17a/dembczynski17a.pdf", "supp": "", "pdf_size": 403751, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11599296769382889651&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Institute of Computing Science, Poznan University of Technology, Poland; Institute of Computing Science, Poznan University of Technology, Poland; Department of Computer Science, University of Illinois at Urbana-Champaign, USA; Microsoft Research, India", "aff_domain": "cs.put.poznan.pl; ; ; ", "email": "cs.put.poznan.pl; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/dembczynski17a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "Poznan University of Technology;University of Illinois Urbana-Champaign;Microsoft", "aff_unique_dep": "Institute of Computing Science;Department of Computer Science;Microsoft Research", "aff_unique_url": "https://www.put.poznan.pl/;https://illinois.edu;https://www.microsoft.com/en-us/research/group/india.aspx", "aff_unique_abbr": "PUT;UIUC;MSR India", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Poznan;Urbana-Champaign;", "aff_country_unique_index": "0;0;1;2", "aff_country_unique": "Poland;United States;India" }, { "title": "Consistent On-Line Off-Policy Evaluation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/480", "id": "480", "author_site": "Assaf Hallak, Shie Mannor", "author": "Assaf Hallak; Shie Mannor", "abstract": "The problem of on-line off-policy evaluation (OPE) has been actively studied in the last decade due to its importance both as a stand-alone problem and as a module in a policy improvement scheme. However, most Temporal Difference (TD) based solutions ignore the discrepancy between the stationary distribution of the behavior and target policies and its effect on the convergence limit when function approximation is applied. In this paper we propose the Consistent Off-Policy Temporal Difference (COP-TD($\\lambda$, $\\beta$)) algorithm that addresses this issue and reduces this bias at some computational expense. We show that COP-TD($\\lambda$, $\\beta$) can be designed to converge to the same value that would have been obtained by using on-policy TD($\\lambda$) with the target policy. Subsequently, the proposed scheme leads to a related and promising heuristic we call log-COP-TD($\\lambda$, $\\beta$). Both algorithms have favorable empirical results to the current state of the art on-line OPE algorithms. Finally, our formulation sheds some new light on the recently proposed Emphatic TD learning.", "bibtex": "@InProceedings{pmlr-v70-hallak17a,\n title = \t {Consistent On-Line Off-Policy Evaluation},\n author = {Assaf Hallak and Shie Mannor},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1372--1383},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hallak17a/hallak17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hallak17a.html},\n abstract = \t {The problem of on-line off-policy evaluation (OPE) has been actively studied in the last decade due to its importance both as a stand-alone problem and as a module in a policy improvement scheme. However, most Temporal Difference (TD) based solutions ignore the discrepancy between the stationary distribution of the behavior and target policies and its effect on the convergence limit when function approximation is applied. In this paper we propose the Consistent Off-Policy Temporal Difference (COP-TD($\\lambda$, $\\beta$)) algorithm that addresses this issue and reduces this bias at some computational expense. We show that COP-TD($\\lambda$, $\\beta$) can be designed to converge to the same value that would have been obtained by using on-policy TD($\\lambda$) with the target policy. Subsequently, the proposed scheme leads to a related and promising heuristic we call log-COP-TD($\\lambda$, $\\beta$). Both algorithms have favorable empirical results to the current state of the art on-line OPE algorithms. Finally, our formulation sheds some new light on the recently proposed Emphatic TD learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/hallak17a/hallak17a.pdf", "supp": "", "pdf_size": 1448169, "gs_citation": 116, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3623586126269579774&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The Technion, Haifa, Israel; The Technion, Haifa, Israel", "aff_domain": "gmail.com;ee.technion.ac.il", "email": "gmail.com;ee.technion.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/hallak17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Technion", "aff_unique_dep": "", "aff_unique_url": "http://www.technion.ac.il", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Haifa", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Consistent k-Clustering", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/588", "id": "588", "author_site": "Silvio Lattanzi, Sergei Vassilvitskii", "author": "Silvio Lattanzi; Sergei Vassilvitskii", "abstract": "The study of online algorithms and competitive analysis provides a solid foundation for studying the quality of irrevocable decision making when the data arrives in an online manner. While in some scenarios the decisions are indeed irrevocable, there are many practical situations when changing a previous decision is not impossible, but simply expensive. In this work we formalize this notion and introduce the consistent k-clustering problem. With points arriving online, the goal is to maintain a constant approximate solution, while minimizing the number of reclusterings necessary. We prove a lower bound, showing that $\\Omega(k \\log n)$ changes are necessary in the worst case for a wide range of objective functions. On the positive side, we give an algorithm that needs only $O(k^2 \\log^4n)$ changes to maintain a constant competitive solution, an exponential improvement on the naive solution of reclustering at every time step. Finally, we show experimentally that our approach performs much better than the theoretical bound, with the number of changes growing approximately as $O(\\log n)$.", "bibtex": "@InProceedings{pmlr-v70-lattanzi17a,\n title = \t {Consistent k-Clustering},\n author = {Silvio Lattanzi and Sergei Vassilvitskii},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1975--1984},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lattanzi17a/lattanzi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lattanzi17a.html},\n abstract = \t {The study of online algorithms and competitive analysis provides a solid foundation for studying the quality of irrevocable decision making when the data arrives in an online manner. While in some scenarios the decisions are indeed irrevocable, there are many practical situations when changing a previous decision is not impossible, but simply expensive. In this work we formalize this notion and introduce the consistent k-clustering problem. With points arriving online, the goal is to maintain a constant approximate solution, while minimizing the number of reclusterings necessary. We prove a lower bound, showing that $\\Omega(k \\log n)$ changes are necessary in the worst case for a wide range of objective functions. On the positive side, we give an algorithm that needs only $O(k^2 \\log^4n)$ changes to maintain a constant competitive solution, an exponential improvement on the naive solution of reclustering at every time step. Finally, we show experimentally that our approach performs much better than the theoretical bound, with the number of changes growing approximately as $O(\\log n)$.}\n}", "pdf": "http://proceedings.mlr.press/v70/lattanzi17a/lattanzi17a.pdf", "supp": "", "pdf_size": 811137, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6189071009587453702&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Google, Zurich, Switzerland; Google, New York, New York, USA", "aff_domain": "google.com;google.com", "email": "google.com;google.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/lattanzi17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google", "aff_unique_url": "https://www.google.ch", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Zurich;New York", "aff_country_unique_index": "0;1", "aff_country_unique": "Switzerland;United States" }, { "title": "Constrained Policy Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/848", "id": "848", "author_site": "Joshua Achiam, David Held, Aviv Tamar, Pieter Abbeel", "author": "Joshua Achiam; David Held; Aviv Tamar; Pieter Abbeel", "abstract": "For many applications of reinforcement learning it can be more convenient to specify both a reward function and constraints, rather than trying to design behavior through the reward function. For example, systems that physically interact with or around humans should satisfy safety constraints. Recent advances in policy search algorithms (Mnih et al., 2016, Schulman et al., 2015, Lillicrap et al., 2016, Levine et al., 2016) have enabled new capabilities in high-dimensional control, but do not consider the constrained setting. We propose Constrained Policy Optimization (CPO), the first general-purpose policy search algorithm for constrained reinforcement learning with guarantees for near-constraint satisfaction at each iteration. Our method allows us to train neural network policies for high-dimensional control while making guarantees about policy behavior all throughout training. Our guarantees are based on a new theoretical result, which is of independent interest: we prove a bound relating the expected returns of two policies to an average divergence between them. We demonstrate the effectiveness of our approach on simulated robot locomotion tasks where the agent must satisfy constraints motivated by safety.", "bibtex": "@InProceedings{pmlr-v70-achiam17a,\n title = \t {Constrained Policy Optimization},\n author = {Joshua Achiam and David Held and Aviv Tamar and Pieter Abbeel},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {22--31},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/achiam17a/achiam17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/achiam17a.html},\n abstract = \t {For many applications of reinforcement learning it can be more convenient to specify both a reward function and constraints, rather than trying to design behavior through the reward function. For example, systems that physically interact with or around humans should satisfy safety constraints. Recent advances in policy search algorithms (Mnih et al., 2016, Schulman et al., 2015, Lillicrap et al., 2016, Levine et al., 2016) have enabled new capabilities in high-dimensional control, but do not consider the constrained setting. We propose Constrained Policy Optimization (CPO), the first general-purpose policy search algorithm for constrained reinforcement learning with guarantees for near-constraint satisfaction at each iteration. Our method allows us to train neural network policies for high-dimensional control while making guarantees about policy behavior all throughout training. Our guarantees are based on a new theoretical result, which is of independent interest: we prove a bound relating the expected returns of two policies to an average divergence between them. We demonstrate the effectiveness of our approach on simulated robot locomotion tasks where the agent must satisfy constraints motivated by safety.}\n}", "pdf": "http://proceedings.mlr.press/v70/achiam17a/achiam17a.pdf", "supp": "", "pdf_size": 944800, "gs_citation": 1815, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6114366704163518185&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "UC Berkeley; UC Berkeley; UC Berkeley; UC Berkeley+OpenAI", "aff_domain": "berkeley.edu; ; ; ", "email": "berkeley.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/achiam17a.html", "aff_unique_index": "0;0;0;0+1", "aff_unique_norm": "University of California, Berkeley;OpenAI", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://openai.com", "aff_unique_abbr": "UC Berkeley;OpenAI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Contextual Decision Processes with low Bellman rank are PAC-Learnable", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/610", "id": "610", "author_site": "Nan Jiang, Akshay Krishnamurthy, Alekh Agarwal, John Langford, Robert Schapire", "author": "Nan Jiang; Akshay Krishnamurthy; Alekh Agarwal; John Langford; Robert E. Schapire", "abstract": "This paper studies systematic exploration for reinforcement learning (RL) with rich observations and function approximation. We introduce contextual decision processes (CDPs), that unify most prior RL settings. Our first contribution is a complexity measure, the Bellman rank, that we show enables tractable learning of near-optimal behavior in CDPs and is naturally small for many well-studied RL models. Our second contribution is a new RL algorithm that does systematic exploration to learn near-optimal behavior in CDPs with low Bellman rank. The algorithm requires a number of samples that is polynomial in all relevant parameters but independent of the number of unique contexts. Our approach uses Bellman error minimization with optimistic exploration and provides new insights into efficient exploration for RL with function approximation.", "bibtex": "@InProceedings{pmlr-v70-jiang17c,\n title = \t {Contextual Decision Processes with low {B}ellman rank are {PAC}-Learnable},\n author = {Nan Jiang and Akshay Krishnamurthy and Alekh Agarwal and John Langford and Robert E. Schapire},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1704--1713},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jiang17c/jiang17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/jiang17c.html},\n abstract = \t {This paper studies systematic exploration for reinforcement learning (RL) with rich observations and function approximation. We introduce contextual decision processes (CDPs), that unify most prior RL settings. Our first contribution is a complexity measure, the Bellman rank, that we show enables tractable learning of near-optimal behavior in CDPs and is naturally small for many well-studied RL models. Our second contribution is a new RL algorithm that does systematic exploration to learn near-optimal behavior in CDPs with low Bellman rank. The algorithm requires a number of samples that is polynomial in all relevant parameters but independent of the number of unique contexts. Our approach uses Bellman error minimization with optimistic exploration and provides new insights into efficient exploration for RL with function approximation.}\n}", "pdf": "http://proceedings.mlr.press/v70/jiang17c/jiang17c.pdf", "supp": "", "pdf_size": 370609, "gs_citation": 528, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17882848604131569807&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of Michigan, Ann Arbor; University of Massachusetts, Amherst; Microsoft Research, New York; Microsoft Research, New York; Microsoft Research, New York", "aff_domain": "umich.edu; ; ; ; ", "email": "umich.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/jiang17c.html", "aff_unique_index": "0;1;2;2;2", "aff_unique_norm": "University of Michigan;University of Massachusetts Amherst;Microsoft", "aff_unique_dep": ";;Microsoft Research", "aff_unique_url": "https://www.umich.edu;https://www.umass.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UM;UMass Amherst;MSR", "aff_campus_unique_index": "0;1;2;2;2", "aff_campus_unique": "Ann Arbor;Amherst;New York", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Continual Learning Through Synaptic Intelligence", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/598", "id": "598", "author_site": "Friedemann Zenke, Ben Poole, Surya Ganguli", "author": "Friedemann Zenke; Ben Poole; Surya Ganguli", "abstract": "While deep learning has led to remarkable advances across diverse applications, it struggles in domains where the data distribution changes over the course of learning. In stark contrast, biological neural networks continually adapt to changing domains, possibly by leveraging complex molecular machinery to solve many tasks simultaneously. In this study, we introduce intelligent synapses that bring some of this biological complexity into artificial neural networks. Each synapse accumulates task relevant information over time, and exploits this information to rapidly store new memories without forgetting old ones. We evaluate our approach on continual learning of classification tasks, and show that it dramatically reduces forgetting while maintaining computational efficiency.", "bibtex": "@InProceedings{pmlr-v70-zenke17a,\n title = \t {Continual Learning Through Synaptic Intelligence},\n author = {Friedemann Zenke and Ben Poole and Surya Ganguli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3987--3995},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zenke17a/zenke17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zenke17a.html},\n abstract = \t {While deep learning has led to remarkable advances across diverse applications, it struggles in domains where the data distribution changes over the course of learning. In stark contrast, biological neural networks continually adapt to changing domains, possibly by leveraging complex molecular machinery to solve many tasks simultaneously. In this study, we introduce intelligent synapses that bring some of this biological complexity into artificial neural networks. Each synapse accumulates task relevant information over time, and exploits this information to rapidly store new memories without forgetting old ones. We evaluate our approach on continual learning of classification tasks, and show that it dramatically reduces forgetting while maintaining computational efficiency.}\n}", "pdf": "http://proceedings.mlr.press/v70/zenke17a/zenke17a.pdf", "supp": "", "pdf_size": 928409, "gs_citation": 3346, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13353907805622310554&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu;cs.stanford.edu; ", "email": "stanford.edu;cs.stanford.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zenke17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Convergence Analysis of Proximal Gradient with Momentum for Nonconvex Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/837", "id": "837", "author_site": "Qunwei Li, Yi Zhou, Yingbin Liang, Pramod K Varshney", "author": "Qunwei Li; Yi Zhou; Yingbin Liang; Pramod K. Varshney", "abstract": "In this work, we investigate the accelerated proximal gradient method for nonconvex programming (APGnc). The method compares between a usual proximal gradient step and a linear extrapolation step, and accepts the one that has a lower function value to achieve a monotonic decrease. In specific, under a general nonsmooth and nonconvex setting, we provide a rigorous argument to show that the limit points of the sequence generated by APGnc are critical points of the objective function. Then, by exploiting the Kurdyka-Lojasiewicz (KL) property for a broad class of functions, we establish the linear and sub-linear convergence rates of the function value sequence generated by APGnc. We further propose a stochastic variance reduced APGnc (SVRG-APGnc), and establish its linear convergence under a special case of the KL property. We also extend the analysis to the inexact version of these methods and develop an adaptive momentum strategy that improves the numerical performance.", "bibtex": "@InProceedings{pmlr-v70-li17g,\n title = \t {Convergence Analysis of Proximal Gradient with Momentum for Nonconvex Optimization},\n author = {Qunwei Li and Yi Zhou and Yingbin Liang and Pramod K. Varshney},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2111--2119},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17g/li17g.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17g.html},\n abstract = \t {In this work, we investigate the accelerated proximal gradient method for nonconvex programming (APGnc). The method compares between a usual proximal gradient step and a linear extrapolation step, and accepts the one that has a lower function value to achieve a monotonic decrease. In specific, under a general nonsmooth and nonconvex setting, we provide a rigorous argument to show that the limit points of the sequence generated by APGnc are critical points of the objective function. Then, by exploiting the Kurdyka-Lojasiewicz (KL) property for a broad class of functions, we establish the linear and sub-linear convergence rates of the function value sequence generated by APGnc. We further propose a stochastic variance reduced APGnc (SVRG-APGnc), and establish its linear convergence under a special case of the KL property. We also extend the analysis to the inexact version of these methods and develop an adaptive momentum strategy that improves the numerical performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17g/li17g.pdf", "supp": "", "pdf_size": 321588, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2557469997527111313&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Syracuse University, NY, USA; Syracuse University, NY, USA; Syracuse University, NY, USA; Syracuse University, NY, USA", "aff_domain": "syr.edu; ; ; ", "email": "syr.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/li17g.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Syracuse University", "aff_unique_dep": "", "aff_unique_url": "https://www.syracuse.edu", "aff_unique_abbr": "Syracuse", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "NY", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Convex Phase Retrieval without Lifting via PhaseMax", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/636", "id": "636", "author_site": "Tom Goldstein, Christoph Studer", "author": "Tom Goldstein; Christoph Studer", "abstract": "Semidefinite relaxation methods transform a variety of non-convex optimization problems into convex problems, but square the number of variables. We study a new type of convex relaxation for phase retrieval problems, called PhaseMax, that convexifies the underlying problem without lifting. The resulting problem formulation can be solved using standard convex optimization routines, while still working in the original, low-dimensional variable space. We prove, using a random spherical distribution measurement model, that PhaseMax succeeds with high probability for a sufficiently large number of measurements. We compare our approach to other phase retrieval methods and demonstrate that our theory accurately predicts the success of PhaseMax.", "bibtex": "@InProceedings{pmlr-v70-goldstein17a,\n title = \t {Convex Phase Retrieval without Lifting via {P}hase{M}ax},\n author = {Tom Goldstein and Christoph Studer},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1273--1281},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/goldstein17a/goldstein17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/goldstein17a.html},\n abstract = \t {Semidefinite relaxation methods transform a variety of non-convex optimization problems into convex problems, but square the number of variables. We study a new type of convex relaxation for phase retrieval problems, called PhaseMax, that convexifies the underlying problem without lifting. The resulting problem formulation can be solved using standard convex optimization routines, while still working in the original, low-dimensional variable space. We prove, using a random spherical distribution measurement model, that PhaseMax succeeds with high probability for a sufficiently large number of measurements. We compare our approach to other phase retrieval methods and demonstrate that our theory accurately predicts the success of PhaseMax.}\n}", "pdf": "http://proceedings.mlr.press/v70/goldstein17a/goldstein17a.pdf", "supp": "", "pdf_size": 554914, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5601489141172696358&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Maryland; Cornell University", "aff_domain": "cs.umd.edu; ", "email": "cs.umd.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/goldstein17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Maryland;Cornell University", "aff_unique_dep": ";", "aff_unique_url": "https://www/umd.edu;https://www.cornell.edu", "aff_unique_abbr": "UMD;Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Convexified Convolutional Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/540", "id": "540", "author_site": "Yuchen Zhang, Percy Liang, Martin Wainwright", "author": "Yuchen Zhang; Percy Liang; Martin J. Wainwright", "abstract": "We describe the class of convexified convolutional neural networks (CCNNs), which capture the parameter sharing of convolutional neural networks in a convex manner. By representing the nonlinear convolutional filters as vectors in a reproducing kernel Hilbert space, the CNN parameters can be represented as a low-rank matrix, which can be relaxed to obtain a convex optimization problem. For learning two-layer convolutional neural networks, we prove that the generalization error obtained by a convexified CNN converges to that of the best possible CNN. For learning deeper networks, we train CCNNs in a layer-wise manner. Empirically, CCNNs achieve competitive or better performance than CNNs trained by backpropagation, SVMs, fully-connected neural networks, stacked denoising auto-encoders, and other baseline methods.", "bibtex": "@InProceedings{pmlr-v70-zhang17f,\n title = \t {Convexified Convolutional Neural Networks},\n author = {Yuchen Zhang and Percy Liang and Martin J. Wainwright},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4044--4053},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17f/zhang17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17f.html},\n abstract = \t {We describe the class of convexified convolutional neural networks (CCNNs), which capture the parameter sharing of convolutional neural networks in a convex manner. By representing the nonlinear convolutional filters as vectors in a reproducing kernel Hilbert space, the CNN parameters can be represented as a low-rank matrix, which can be relaxed to obtain a convex optimization problem. For learning two-layer convolutional neural networks, we prove that the generalization error obtained by a convexified CNN converges to that of the best possible CNN. For learning deeper networks, we train CCNNs in a layer-wise manner. Empirically, CCNNs achieve competitive or better performance than CNNs trained by backpropagation, SVMs, fully-connected neural networks, stacked denoising auto-encoders, and other baseline methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17f/zhang17f.pdf", "supp": "", "pdf_size": 468922, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16200341034187935881&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Stanford University, CA, USA; Stanford University, CA, USA; University of California, Berkeley, CA, USA", "aff_domain": "cs.stanford.edu; ; ", "email": "cs.stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zhang17f.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Stanford University;University of California, Berkeley", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.berkeley.edu", "aff_unique_abbr": "Stanford;UC Berkeley", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "California;Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Convolutional Sequence to Sequence Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/806", "id": "806", "author_site": "Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, Yann Dauphin", "author": "Jonas Gehring; Michael Auli; David Grangier; Denis Yarats; Yann N. Dauphin", "abstract": "The prevalent approach to sequence to sequence learning maps an input sequence to a variable length output sequence via recurrent neural networks. We introduce an architecture based entirely on convolutional neural networks. Compared to recurrent models, computations over all elements can be fully parallelized during training to better exploit the GPU hardware and optimization is easier since the number of non-linearities is fixed and independent of the input length. Our use of gated linear units eases gradient propagation and we equip each decoder layer with a separate attention module. We outperform the accuracy of the deep LSTM setup of Wu et al. (2016) on both WMT\u201914 English-German and WMT\u201914 English-French translation at an order of magnitude faster speed, both on GPU and CPU.", "bibtex": "@InProceedings{pmlr-v70-gehring17a,\n title = \t {Convolutional Sequence to Sequence Learning},\n author = {Jonas Gehring and Michael Auli and David Grangier and Denis Yarats and Yann N. Dauphin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1243--1252},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gehring17a/gehring17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gehring17a.html},\n abstract = \t {The prevalent approach to sequence to sequence learning maps an input sequence to a variable length output sequence via recurrent neural networks. We introduce an architecture based entirely on convolutional neural networks. Compared to recurrent models, computations over all elements can be fully parallelized during training to better exploit the GPU hardware and optimization is easier since the number of non-linearities is fixed and independent of the input length. Our use of gated linear units eases gradient propagation and we equip each decoder layer with a separate attention module. We outperform the accuracy of the deep LSTM setup of Wu et al. (2016) on both WMT\u201914 English-German and WMT\u201914 English-French translation at an order of magnitude faster speed, both on GPU and CPU.}\n}", "pdf": "http://proceedings.mlr.press/v70/gehring17a/gehring17a.pdf", "supp": "", "pdf_size": 472288, "gs_citation": 4576, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9032432574575787905&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com;fb.com; ; ; ", "email": "fb.com;fb.com; ; ; ", "github": "https://github.com/facebookresearch/fairseq", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/gehring17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Coordinated Multi-Agent Imitation Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/621", "id": "621", "author_site": "Hoang Le, Yisong Yue, Peter Carr, Patrick Lucey", "author": "Hoang M. Le; Yisong Yue; Peter Carr; Patrick Lucey", "abstract": "We study the problem of imitation learning from demonstrations of multiple coordinating agents. One key challenge in this setting is that learning a good model of coordination can be difficult, since coordination is often implicit in the demonstrations and must be inferred as a latent variable. We propose a joint approach that simultaneously learns a latent coordination model along with the individual policies. In particular, our method integrates unsupervised structure learning with conventional imitation learning. We illustrate the power of our approach on a difficult problem of learning multiple policies for fine-grained behavior modeling in team sports, where different players occupy different roles in the coordinated team strategy. We show that having a coordination model to infer the roles of players yields substantially improved imitation loss compared to conventional baselines.", "bibtex": "@InProceedings{pmlr-v70-le17a,\n title = \t {Coordinated Multi-Agent Imitation Learning},\n author = {Hoang M. Le and Yisong Yue and Peter Carr and Patrick Lucey},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1995--2003},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/le17a/le17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/le17a.html},\n abstract = \t {We study the problem of imitation learning from demonstrations of multiple coordinating agents. One key challenge in this setting is that learning a good model of coordination can be difficult, since coordination is often implicit in the demonstrations and must be inferred as a latent variable. We propose a joint approach that simultaneously learns a latent coordination model along with the individual policies. In particular, our method integrates unsupervised structure learning with conventional imitation learning. We illustrate the power of our approach on a difficult problem of learning multiple policies for fine-grained behavior modeling in team sports, where different players occupy different roles in the coordinated team strategy. We show that having a coordination model to infer the roles of players yields substantially improved imitation loss compared to conventional baselines.}\n}", "pdf": "http://proceedings.mlr.press/v70/le17a/le17a.pdf", "supp": "", "pdf_size": 2131764, "gs_citation": 258, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17889694284157310653&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 15, "aff": "California Institute of Technology; California Institute of Technology; Disney Research; STATS LLC", "aff_domain": "caltech.edu; ; ; ", "email": "caltech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/le17a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "California Institute of Technology;Disney Research;STATS LLC", "aff_unique_dep": ";;", "aff_unique_url": "https://www.caltech.edu;https://research.disney.com;https://www.stats.com", "aff_unique_abbr": "Caltech;Disney Research;STATS", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena;", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Coresets for Vector Summarization with Applications to Network Graphs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/481", "id": "481", "author_site": "Dan Feldman, Sedat Ozer, Daniela Rus", "author": "Dan Feldman; Sedat Ozer; Daniela Rus", "abstract": "We provide a deterministic data summarization algorithm that approximates the mean $\\bar{p}=\\frac{1}{n}\\sum_{p\\in P} p$ of a set $P$ of $n$ vectors in $\\mathbb{R}^d$, by a weighted mean $\\tilde{p}$ of a", "bibtex": "@InProceedings{pmlr-v70-feldman17a,\n title = \t {Coresets for Vector Summarization with Applications to Network Graphs},\n author = {Dan Feldman and Sedat Ozer and Daniela Rus},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1117--1125},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/feldman17a/feldman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/feldman17a.html},\n abstract = \t {We provide a deterministic data summarization algorithm that approximates the mean $\\bar{p}=\\frac{1}{n}\\sum_{p\\in P} p$ of a set $P$ of $n$ vectors in $\\mathbb{R}^d$, by a weighted mean $\\tilde{p}$ of a", "pdf": "http://proceedings.mlr.press/v70/feldman17a/feldman17a.pdf", "supp": "", "pdf_size": 555256, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9226608617114374431&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Haifa, Israel; CSAIL, MIT; CSAIL, MIT", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/feldman17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Haifa;Massachusetts Institute of Technology", "aff_unique_dep": ";Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.haifa.ac.il;https://www.csail.mit.edu", "aff_unique_abbr": "UoH;MIT", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Israel;United States" }, { "title": "Cost-Optimal Learning of Causal Graphs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/747", "id": "747", "author_site": "Murat Kocaoglu, Alexandros Dimakis, Sriram Vishwanath", "author": "Murat Kocaoglu; Alex Dimakis; Sriram Vishwanath", "abstract": "We consider the problem of learning a causal graph over a set of variables with interventions. We study the cost-optimal causal graph learning problem: For a given skeleton (undirected version of the causal graph), design the set of interventions with minimum total cost, that can uniquely identify any causal graph with the given skeleton. We show that this problem is solvable in polynomial time. Later, we consider the case when the number of interventions is limited. For this case, we provide polynomial time algorithms when the skeleton is a tree or a clique tree. For a general chordal skeleton, we develop an efficient greedy algorithm, which can be improved when the causal graph skeleton is an interval graph.", "bibtex": "@InProceedings{pmlr-v70-kocaoglu17a,\n title = \t {Cost-Optimal Learning of Causal Graphs},\n author = {Murat Kocaoglu and Alex Dimakis and Sriram Vishwanath},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1875--1884},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kocaoglu17a/kocaoglu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kocaoglu17a.html},\n abstract = \t {We consider the problem of learning a causal graph over a set of variables with interventions. We study the cost-optimal causal graph learning problem: For a given skeleton (undirected version of the causal graph), design the set of interventions with minimum total cost, that can uniquely identify any causal graph with the given skeleton. We show that this problem is solvable in polynomial time. Later, we consider the case when the number of interventions is limited. For this case, we provide polynomial time algorithms when the skeleton is a tree or a clique tree. For a general chordal skeleton, we develop an efficient greedy algorithm, which can be improved when the causal graph skeleton is an interval graph.}\n}", "pdf": "http://proceedings.mlr.press/v70/kocaoglu17a/kocaoglu17a.pdf", "supp": "", "pdf_size": 534274, "gs_citation": 84, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17474594909251818929&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin", "aff_domain": "utexas.edu; ; ", "email": "utexas.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kocaoglu17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Count-Based Exploration with Neural Density Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/839", "id": "839", "author_site": "Georg Ostrovski, Marc Bellemare, A\u00e4ron van den Oord, Remi Munos", "author": "Georg Ostrovski; Marc G. Bellemare; A\u00e4ron Oord; R\u00e9mi Munos", "abstract": "Bellemare et al. (2016) introduced the notion of a pseudo-count, derived from a density model, to generalize count-based exploration to non-tabular reinforcement learning. This pseudo-count was used to generate an exploration bonus for a DQN agent and combined with a mixed Monte Carlo update was sufficient to achieve state of the art on the Atari 2600 game Montezuma\u2019s Revenge. We consider two questions left open by their work: First, how important is the quality of the density model for exploration? Second, what role does the Monte Carlo update play in exploration? We answer the first question by demonstrating the use of PixelCNN, an advanced neural density model for images, to supply a pseudo-count. In particular, we examine the intrinsic difficulties in adapting Bellemare et al.\u2019s approach when assumptions about the model are violated. The result is a more practical and general algorithm requiring no special apparatus. We combine PixelCNN pseudo-counts with different agent architectures to dramatically improve the state of the art on several hard Atari games. One surprising finding is that the mixed Monte Carlo update is a powerful facilitator of exploration in the sparsest of settings, including Montezuma\u2019s Revenge.", "bibtex": "@InProceedings{pmlr-v70-ostrovski17a,\n title = \t {Count-Based Exploration with Neural Density Models},\n author = {Georg Ostrovski and Marc G. Bellemare and A{\\\"a}ron van den Oord and R{\\'e}mi Munos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2721--2730},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ostrovski17a/ostrovski17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ostrovski17a.html},\n abstract = \t {Bellemare et al. (2016) introduced the notion of a pseudo-count, derived from a density model, to generalize count-based exploration to non-tabular reinforcement learning. This pseudo-count was used to generate an exploration bonus for a DQN agent and combined with a mixed Monte Carlo update was sufficient to achieve state of the art on the Atari 2600 game Montezuma\u2019s Revenge. We consider two questions left open by their work: First, how important is the quality of the density model for exploration? Second, what role does the Monte Carlo update play in exploration? We answer the first question by demonstrating the use of PixelCNN, an advanced neural density model for images, to supply a pseudo-count. In particular, we examine the intrinsic difficulties in adapting Bellemare et al.\u2019s approach when assumptions about the model are violated. The result is a more practical and general algorithm requiring no special apparatus. We combine PixelCNN pseudo-counts with different agent architectures to dramatically improve the state of the art on several hard Atari games. One surprising finding is that the mixed Monte Carlo update is a powerful facilitator of exploration in the sparsest of settings, including Montezuma\u2019s Revenge.}\n}", "pdf": "http://proceedings.mlr.press/v70/ostrovski17a/ostrovski17a.pdf", "supp": "", "pdf_size": 1101995, "gs_citation": 806, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7236095966352642924&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/ostrovski17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Counterfactual Data-Fusion for Online Reinforcement Learners", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/872", "id": "872", "author_site": "Andrew Forney, Judea Pearl, Elias Bareinboim", "author": "Andrew Forney; Judea Pearl; Elias Bareinboim", "abstract": "The Multi-Armed Bandit problem with Unobserved Confounders (MABUC) considers decision-making settings where unmeasured variables can influence both the agent\u2019s decisions and received rewards (Bareinboim et al., 2015). Recent findings showed that unobserved confounders (UCs) pose a unique challenge to algorithms based on standard randomization (i.e., experimental data); if UCs are naively averaged out, these algorithms behave sub-optimally, possibly incurring infinite regret. In this paper, we show how counterfactual-based decision-making circumvents these problems and leads to a coherent fusion of observational and experimental data. We then demonstrate this new strategy in an enhanced Thompson Sampling bandit player, and support our findings\u2019 efficacy with extensive simulations.", "bibtex": "@InProceedings{pmlr-v70-forney17a,\n title = \t {Counterfactual Data-Fusion for Online Reinforcement Learners},\n author = {Andrew Forney and Judea Pearl and Elias Bareinboim},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1156--1164},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/forney17a/forney17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/forney17a.html},\n abstract = \t {The Multi-Armed Bandit problem with Unobserved Confounders (MABUC) considers decision-making settings where unmeasured variables can influence both the agent\u2019s decisions and received rewards (Bareinboim et al., 2015). Recent findings showed that unobserved confounders (UCs) pose a unique challenge to algorithms based on standard randomization (i.e., experimental data); if UCs are naively averaged out, these algorithms behave sub-optimally, possibly incurring infinite regret. In this paper, we show how counterfactual-based decision-making circumvents these problems and leads to a coherent fusion of observational and experimental data. We then demonstrate this new strategy in an enhanced Thompson Sampling bandit player, and support our findings\u2019 efficacy with extensive simulations.}\n}", "pdf": "http://proceedings.mlr.press/v70/forney17a/forney17a.pdf", "supp": "", "pdf_size": 409773, "gs_citation": 89, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5098259566706920316&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of California, Los Angeles, California, USA; University of California, Los Angeles, California, USA; Purdue University, West Lafayette, Indiana, USA", "aff_domain": "cs.ucla.edu; ; ", "email": "cs.ucla.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/forney17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "University of California, Los Angeles;Purdue University", "aff_unique_dep": ";", "aff_unique_url": "https://www.ucla.edu;https://www.purdue.edu", "aff_unique_abbr": "UCLA;Purdue", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "Los Angeles;West Lafayette", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Coupling Distributed and Symbolic Execution for Natural Language Queries", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/572", "id": "572", "author_site": "Lili Mou, Zhengdong Lu, Hang Li, Zhi Jin", "author": "Lili Mou; Zhengdong Lu; Hang Li; Zhi Jin", "abstract": "Building neural networks to query a knowledge base (a table) with natural language is an emerging research topic in deep learning. An executor for table querying typically requires multiple steps of execution because queries may have complicated structures. In previous studies, researchers have developed either fully distributed executors or symbolic executors for table querying. A distributed executor can be trained in an end-to-end fashion, but is weak in terms of execution efficiency and explicit interpretability. A symbolic executor is efficient in execution, but is very difficult to train especially at initial stages. In this paper, we propose to couple distributed and symbolic execution for natural language queries, where the symbolic executor is pretrained with the distributed executor\u2019s intermediate execution results in a step-by-step fashion. Experiments show that our approach significantly outperforms both distributed and symbolic executors, exhibiting high accuracy, high learning efficiency, high execution efficiency, and high interpretability.", "bibtex": "@InProceedings{pmlr-v70-mou17a,\n title = \t {Coupling Distributed and Symbolic Execution for Natural Language Queries},\n author = {Lili Mou and Zhengdong Lu and Hang Li and Zhi Jin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2518--2526},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mou17a/mou17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mou17a.html},\n abstract = \t {Building neural networks to query a knowledge base (a table) with natural language is an emerging research topic in deep learning. An executor for table querying typically requires multiple steps of execution because queries may have complicated structures. In previous studies, researchers have developed either fully distributed executors or symbolic executors for table querying. A distributed executor can be trained in an end-to-end fashion, but is weak in terms of execution efficiency and explicit interpretability. A symbolic executor is efficient in execution, but is very difficult to train especially at initial stages. In this paper, we propose to couple distributed and symbolic execution for natural language queries, where the symbolic executor is pretrained with the distributed executor\u2019s intermediate execution results in a step-by-step fashion. Experiments show that our approach significantly outperforms both distributed and symbolic executors, exhibiting high accuracy, high learning efficiency, high execution efficiency, and high interpretability.}\n}", "pdf": "http://proceedings.mlr.press/v70/mou17a/mou17a.pdf", "supp": "", "pdf_size": 1155348, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5625932791718839993&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/mou17a.html" }, { "title": "Curiosity-driven Exploration by Self-supervised Prediction", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/478", "id": "478", "author_site": "Deepak Pathak, Pulkit Agrawal, Alexei Efros, Trevor Darrell", "author": "Deepak Pathak; Pulkit Agrawal; Alexei A. Efros; Trevor Darrell", "abstract": "In many real-world scenarios, rewards extrinsic to the agent are extremely sparse, or absent altogether. In such cases, curiosity can serve as an intrinsic reward signal to enable the agent to explore its environment and learn skills that might be useful later in its life. We formulate curiosity as the error in an agent\u2019s ability to predict the consequence of its own actions in a visual feature space learned by a self-supervised inverse dynamics model. Our formulation scales to high-dimensional continuous state spaces like images, bypasses the difficulties of directly predicting pixels, and, critically, ignores the aspects of the environment that cannot affect the agent. The proposed approach is evaluated in two environments: VizDoom and Super Mario Bros. Three broad settings are investigated: 1) sparse extrinsic reward, where curiosity allows for far fewer interactions with the environment to reach the goal; 2) exploration with no extrinsic reward, where curiosity pushes the agent to explore more efficiently; and 3) generalization to unseen scenarios (e.g. new levels of the same game) where the knowledge gained from earlier experience helps the agent explore new places much faster than starting from scratch.", "bibtex": "@InProceedings{pmlr-v70-pathak17a,\n title = \t {Curiosity-driven Exploration by Self-supervised Prediction},\n author = {Deepak Pathak and Pulkit Agrawal and Alexei A. Efros and Trevor Darrell},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2778--2787},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pathak17a/pathak17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pathak17a.html},\n abstract = \t {In many real-world scenarios, rewards extrinsic to the agent are extremely sparse, or absent altogether. In such cases, curiosity can serve as an intrinsic reward signal to enable the agent to explore its environment and learn skills that might be useful later in its life. We formulate curiosity as the error in an agent\u2019s ability to predict the consequence of its own actions in a visual feature space learned by a self-supervised inverse dynamics model. Our formulation scales to high-dimensional continuous state spaces like images, bypasses the difficulties of directly predicting pixels, and, critically, ignores the aspects of the environment that cannot affect the agent. The proposed approach is evaluated in two environments: VizDoom and Super Mario Bros. Three broad settings are investigated: 1) sparse extrinsic reward, where curiosity allows for far fewer interactions with the environment to reach the goal; 2) exploration with no extrinsic reward, where curiosity pushes the agent to explore more efficiently; and 3) generalization to unseen scenarios (e.g. new levels of the same game) where the knowledge gained from earlier experience helps the agent explore new places much faster than starting from scratch.}\n}", "pdf": "http://proceedings.mlr.press/v70/pathak17a/pathak17a.pdf", "supp": "", "pdf_size": 4057485, "gs_citation": 3233, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9379743003299559904&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 17, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "berkeley.edu; ; ; ", "email": "berkeley.edu; ; ; ", "github": "", "project": "http://pathak22.github.io/noreward-rl/", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/pathak17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "DARLA: Improving Zero-Shot Transfer in Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/711", "id": "711", "author_site": "Irina Higgins, Arka Pal, Andrei A Rusu, Loic Matthey, Christopher Burgess, Alexander Pritzel, Matthew Botvinick, Charles Blundell, Alexander Lerchner", "author": "Irina Higgins; Arka Pal; Andrei Rusu; Loic Matthey; Christopher Burgess; Alexander Pritzel; Matthew Botvinick; Charles Blundell; Alexander Lerchner", "abstract": "Domain adaptation is an important open problem in deep reinforcement learning (RL). In many scenarios of interest data is hard to obtain, so agents may learn a source policy in a setting where data is readily available, with the hope that it generalises well to the target domain. We propose a new multi-stage RL agent, DARLA (DisentAngled Representation Learning Agent), which learns to see before learning to act. DARLA\u2019s vision is based on learning a disentangled representation of the observed environment. Once DARLA can see, it is able to acquire source policies that are robust to many domain shifts \u2013 even with no access to the target domain. DARLA significantly outperforms conventional baselines in zero-shot domain adaptation scenarios, an effect that holds across a variety of RL environments (Jaco arm, DeepMind Lab) and base RL algorithms (DQN, A3C and EC).", "bibtex": "@InProceedings{pmlr-v70-higgins17a,\n title = \t {{DARLA}: Improving Zero-Shot Transfer in Reinforcement Learning},\n author = {Irina Higgins and Arka Pal and Andrei Rusu and Loic Matthey and Christopher Burgess and Alexander Pritzel and Matthew Botvinick and Charles Blundell and Alexander Lerchner},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1480--1490},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/higgins17a/higgins17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/higgins17a.html},\n abstract = \t {Domain adaptation is an important open problem in deep reinforcement learning (RL). In many scenarios of interest data is hard to obtain, so agents may learn a source policy in a setting where data is readily available, with the hope that it generalises well to the target domain. We propose a new multi-stage RL agent, DARLA (DisentAngled Representation Learning Agent), which learns to see before learning to act. DARLA\u2019s vision is based on learning a disentangled representation of the observed environment. Once DARLA can see, it is able to acquire source policies that are robust to many domain shifts \u2013 even with no access to the target domain. DARLA significantly outperforms conventional baselines in zero-shot domain adaptation scenarios, an effect that holds across a variety of RL environments (Jaco arm, DeepMind Lab) and base RL algorithms (DQN, A3C and EC).}\n}", "pdf": "http://proceedings.mlr.press/v70/higgins17a/higgins17a.pdf", "supp": "", "pdf_size": 4778804, "gs_citation": 560, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10520192097307501193&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", "aff_domain": "google.com;google.com; ; ; ; ; ; ; ", "email": "google.com;google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v70/higgins17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Dance Dance Convolution", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/653", "id": "653", "author_site": "Chris Donahue, Zachary Lipton, Julian McAuley", "author": "Chris Donahue; Zachary C. Lipton; Julian McAuley", "abstract": "Dance Dance Revolution (DDR) is a popular rhythm-based video game. Players perform steps on a dance platform in synchronization with music as directed by on-screen step charts. While many step charts are available in standardized packs, players may grow tired of existing charts, or wish to dance to a song for which no chart exists. We introduce the task of learning to choreograph. Given a raw audio track, the goal is to produce a new step chart. This task decomposes naturally into two subtasks: deciding when to place steps and deciding which steps to select. For the step placement task, we combine recurrent and convolutional neural networks to ingest spectrograms of low-level audio features to predict steps, conditioned on chart difficulty. For step selection, we present a conditional LSTM generative model that substantially outperforms n-gram and fixed-window approaches.", "bibtex": "@InProceedings{pmlr-v70-donahue17a,\n title = \t {Dance Dance Convolution},\n author = {Chris Donahue and Zachary C. Lipton and Julian McAuley},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1039--1048},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/donahue17a/donahue17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/donahue17a.html},\n abstract = \t {Dance Dance Revolution (DDR) is a popular rhythm-based video game. Players perform steps on a dance platform in synchronization with music as directed by on-screen step charts. While many step charts are available in standardized packs, players may grow tired of existing charts, or wish to dance to a song for which no chart exists. We introduce the task of learning to choreograph. Given a raw audio track, the goal is to produce a new step chart. This task decomposes naturally into two subtasks: deciding when to place steps and deciding which steps to select. For the step placement task, we combine recurrent and convolutional neural networks to ingest spectrograms of low-level audio features to predict steps, conditioned on chart difficulty. For step selection, we present a conditional LSTM generative model that substantially outperforms n-gram and fixed-window approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/donahue17a/donahue17a.pdf", "supp": "", "pdf_size": 2135672, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5392626708822418693&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "UCSD Department of Music; UCSD Department of Computer Science; UCSD Department of Computer Science", "aff_domain": "ucsd.edu; ; ", "email": "ucsd.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/donahue17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Music", "aff_unique_url": "https://ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "La Jolla", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Data-Efficient Policy Evaluation Through Behavior Policy Search", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/855", "id": "855", "author_site": "Josiah Hanna, Philip S. Thomas, Peter Stone, Scott Niekum", "author": "Josiah P. Hanna; Philip S. Thomas; Peter Stone; Scott Niekum", "abstract": "We consider the task of evaluating a policy for a Markov decision process (MDP). The standard unbiased technique for evaluating a policy is to deploy the policy and observe its performance. We show that the data collected from deploying a different policy, commonly called the behavior policy, can be used to produce unbiased estimates with lower mean squared error than this standard technique. We derive an analytic expression for the optimal behavior policy \u2014 the behavior policy that minimizes the mean squared error of the resulting estimates. Because this expression depends on terms that are unknown in practice, we propose a novel policy evaluation sub-problem, behavior policy search: searching for a behavior policy that reduces mean squared error. We present a behavior policy search algorithm and empirically demonstrate its effectiveness in lowering the mean squared error of policy performance estimates.", "bibtex": "@InProceedings{pmlr-v70-hanna17a,\n title = \t {Data-Efficient Policy Evaluation Through Behavior Policy Search},\n author = {Josiah P. Hanna and Philip S. Thomas and Peter Stone and Scott Niekum},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1394--1403},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hanna17a/hanna17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hanna17a.html},\n abstract = \t {We consider the task of evaluating a policy for a Markov decision process (MDP). The standard unbiased technique for evaluating a policy is to deploy the policy and observe its performance. We show that the data collected from deploying a different policy, commonly called the behavior policy, can be used to produce unbiased estimates with lower mean squared error than this standard technique. We derive an analytic expression for the optimal behavior policy \u2014 the behavior policy that minimizes the mean squared error of the resulting estimates. Because this expression depends on terms that are unknown in practice, we propose a novel policy evaluation sub-problem, behavior policy search: searching for a behavior policy that reduces mean squared error. We present a behavior policy search algorithm and empirically demonstrate its effectiveness in lowering the mean squared error of policy performance estimates.}\n}", "pdf": "http://proceedings.mlr.press/v70/hanna17a/hanna17a.pdf", "supp": "", "pdf_size": 1169673, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12530205940584937998&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "The University of Texas at Austin; The University of Massachusetts, Amherst + Carnegie Mellon University; The University of Texas at Austin; The University of Texas at Austin", "aff_domain": "cs.utexas.edu; ; ; ", "email": "cs.utexas.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/hanna17a.html", "aff_unique_index": "0;1+2;0;0", "aff_unique_norm": "University of Texas at Austin;University of Massachusetts Amherst;Carnegie Mellon University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.utexas.edu;https://www.umass.edu;https://www.cmu.edu", "aff_unique_abbr": "UT Austin;UMass Amherst;CMU", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Austin;Amherst;", "aff_country_unique_index": "0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Deciding How to Decide: Dynamic Routing in Artificial Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/642", "id": "642", "author_site": "Mason McGill, Pietro Perona", "author": "Mason McGill; Pietro Perona", "abstract": "We propose and systematically evaluate three strategies for training dynamically-routed artificial neural networks: graphs of learned transformations through which different input signals may take different paths. Though some approaches have advantages over others, the resulting networks are often qualitatively similar. We find that, in dynamically-routed networks trained to classify images, layers and branches become specialized to process distinct categories of images. Additionally, given a fixed computational budget, dynamically-routed networks tend to perform better than comparable statically-routed networks.", "bibtex": "@InProceedings{pmlr-v70-mcgill17a,\n title = \t {Deciding How to Decide: Dynamic Routing in Artificial Neural Networks},\n author = {Mason McGill and Pietro Perona},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2363--2372},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mcgill17a/mcgill17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mcgill17a.html},\n abstract = \t {We propose and systematically evaluate three strategies for training dynamically-routed artificial neural networks: graphs of learned transformations through which different input signals may take different paths. Though some approaches have advantages over others, the resulting networks are often qualitatively similar. We find that, in dynamically-routed networks trained to classify images, layers and branches become specialized to process distinct categories of images. Additionally, given a fixed computational budget, dynamically-routed networks tend to perform better than comparable statically-routed networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/mcgill17a/mcgill17a.pdf", "supp": "", "pdf_size": 1966983, "gs_citation": 138, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12494216683354276081&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "California Institute of Technology; California Institute of Technology", "aff_domain": "caltech.edu; ", "email": "caltech.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/mcgill17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "California Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.caltech.edu", "aff_unique_abbr": "Caltech", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pasadena", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Decoupled Neural Interfaces using Synthetic Gradients", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/665", "id": "665", "author_site": "Max Jaderberg, Wojciech Czarnecki, Simon Osindero, Oriol Vinyals, Alex Graves, David Silver, Koray Kavukcuoglu", "author": "Max Jaderberg; Wojciech Marian Czarnecki; Simon Osindero; Oriol Vinyals; Alex Graves; David Silver; Koray Kavukcuoglu", "abstract": "Training directed neural networks typically requires forward-propagating data through a computation graph, followed by backpropagating error signal, to produce weight updates. All layers, or more generally, modules, of the network are therefore locked, in the sense that they must wait for the remainder of the network to execute forwards and propagate error backwards before they can be updated. In this work we break this constraint by decoupling modules by introducing a model of the future computation of the network graph. These models predict what the result of the modelled subgraph will produce using only local information. In particular we focus on modelling error gradients: by using the modelled", "bibtex": "@InProceedings{pmlr-v70-jaderberg17a,\n title = \t {Decoupled Neural Interfaces using Synthetic Gradients},\n author = {Max Jaderberg and Wojciech Marian Czarnecki and Simon Osindero and Oriol Vinyals and Alex Graves and David Silver and Koray Kavukcuoglu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1627--1635},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jaderberg17a/jaderberg17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jaderberg17a.html},\n abstract = \t {Training directed neural networks typically requires forward-propagating data through a computation graph, followed by backpropagating error signal, to produce weight updates. All layers, or more generally, modules, of the network are therefore locked, in the sense that they must wait for the remainder of the network to execute forwards and propagate error backwards before they can be updated. In this work we break this constraint by decoupling modules by introducing a model of the future computation of the network graph. These models predict what the result of the modelled subgraph will produce using only local information. In particular we focus on modelling error gradients: by using the modelled", "pdf": "http://proceedings.mlr.press/v70/jaderberg17a/jaderberg17a.pdf", "supp": "", "pdf_size": 4070480, "gs_citation": 457, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8826125980550854976&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/jaderberg17a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Deep Bayesian Active Learning with Image Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/794", "id": "794", "author_site": "Yarin Gal, Riashat Islam, Zoubin Ghahramani", "author": "Yarin Gal; Riashat Islam; Zoubin Ghahramani", "abstract": "Even though active learning forms an important pillar of machine learning, deep learning tools are not prevalent within it. Deep learning poses several difficulties when used in an active learning setting. First, active learning (AL) methods generally rely on being able to learn and update models from small amounts of data. Recent advances in deep learning, on the other hand, are notorious for their dependence on large amounts of data. Second, many AL acquisition functions rely on model uncertainty, yet deep learning methods rarely represent such model uncertainty. In this paper we combine recent advances in Bayesian deep learning into the active learning framework in a practical way. We develop an active learning framework for high dimensional data, a task which has been extremely challenging so far, with very sparse existing literature. Taking advantage of specialised models such as Bayesian convolutional neural networks, we demonstrate our active learning techniques with image data, obtaining a significant improvement on existing active learning approaches. We demonstrate this on both the MNIST dataset, as well as for skin cancer diagnosis from lesion images (ISIC2016 task).", "bibtex": "@InProceedings{pmlr-v70-gal17a,\n title = \t {Deep {B}ayesian Active Learning with Image Data},\n author = {Yarin Gal and Riashat Islam and Zoubin Ghahramani},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1183--1192},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gal17a/gal17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gal17a.html},\n abstract = \t {Even though active learning forms an important pillar of machine learning, deep learning tools are not prevalent within it. Deep learning poses several difficulties when used in an active learning setting. First, active learning (AL) methods generally rely on being able to learn and update models from small amounts of data. Recent advances in deep learning, on the other hand, are notorious for their dependence on large amounts of data. Second, many AL acquisition functions rely on model uncertainty, yet deep learning methods rarely represent such model uncertainty. In this paper we combine recent advances in Bayesian deep learning into the active learning framework in a practical way. We develop an active learning framework for high dimensional data, a task which has been extremely challenging so far, with very sparse existing literature. Taking advantage of specialised models such as Bayesian convolutional neural networks, we demonstrate our active learning techniques with image data, obtaining a significant improvement on existing active learning approaches. We demonstrate this on both the MNIST dataset, as well as for skin cancer diagnosis from lesion images (ISIC2016 task).}\n}", "pdf": "http://proceedings.mlr.press/v70/gal17a/gal17a.pdf", "supp": "", "pdf_size": 704667, "gs_citation": 2203, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14700234482257382078&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "University of Cambridge, UK+The Alan Turing Institute, UK; University of Cambridge, UK; Uber AI Labs", "aff_domain": "cam.ac.uk; ; ", "email": "cam.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/gal17a.html", "aff_unique_index": "0+1;0;2", "aff_unique_norm": "University of Cambridge;Alan Turing Institute;Uber", "aff_unique_dep": ";;Uber AI Labs", "aff_unique_url": "https://www.cam.ac.uk;https://www.turing.ac.uk;https://www.uber.com", "aff_unique_abbr": "Cambridge;ATI;Uber AI Labs", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0+0;0;1", "aff_country_unique": "United Kingdom;United States" }, { "title": "Deep Decentralized Multi-task Multi-Agent Reinforcement Learning under Partial Observability", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/845", "id": "845", "author_site": "Shayegan Omidshafiei, Jason Pazis, Chris Amato, Jonathan How, John L Vian", "author": "Shayegan Omidshafiei; Jason Pazis; Christopher Amato; Jonathan P. How; John Vian", "abstract": "Many real-world tasks involve multiple agents with partial observability and limited communication. Learning is challenging in these settings due to local viewpoints of agents, which perceive the world as non-stationary due to concurrently-exploring teammates. Approaches that learn specialized policies for individual tasks face problems when applied to the real world: not only do agents have to learn and store distinct policies for each task, but in practice identities of tasks are often non-observable, making these approaches inapplicable. This paper formalizes and addresses the problem of multi-task multi-agent reinforcement learning under partial observability. We introduce a decentralized single-task learning approach that is robust to concurrent interactions of teammates, and present an approach for distilling single-task policies into a unified policy that performs well across multiple related tasks, without explicit provision of task identity.", "bibtex": "@InProceedings{pmlr-v70-omidshafiei17a,\n title = \t {Deep Decentralized Multi-task Multi-Agent Reinforcement Learning under Partial Observability},\n author = {Shayegan Omidshafiei and Jason Pazis and Christopher Amato and Jonathan P. How and John Vian},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2681--2690},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/omidshafiei17a/omidshafiei17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/omidshafiei17a.html},\n abstract = \t {Many real-world tasks involve multiple agents with partial observability and limited communication. Learning is challenging in these settings due to local viewpoints of agents, which perceive the world as non-stationary due to concurrently-exploring teammates. Approaches that learn specialized policies for individual tasks face problems when applied to the real world: not only do agents have to learn and store distinct policies for each task, but in practice identities of tasks are often non-observable, making these approaches inapplicable. This paper formalizes and addresses the problem of multi-task multi-agent reinforcement learning under partial observability. We introduce a decentralized single-task learning approach that is robust to concurrent interactions of teammates, and present an approach for distilling single-task policies into a unified policy that performs well across multiple related tasks, without explicit provision of task identity.}\n}", "pdf": "http://proceedings.mlr.press/v70/omidshafiei17a/omidshafiei17a.pdf", "supp": "", "pdf_size": 1102015, "gs_citation": 706, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14362474747446780618&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Laboratory for Information and Decision Systems (LIDS), MIT, Cambridge, MA, USA; Laboratory for Information and Decision Systems (LIDS), MIT, Cambridge, MA, USA; College of Computer and Information Science (CCIS), Northeastern University, Boston, MA, USA; Laboratory for Information and Decision Systems (LIDS), MIT, Cambridge, MA, USA; Boeing Research & Technology, Seattle, WA, USA", "aff_domain": "mit.edu; ; ; ; ", "email": "mit.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/omidshafiei17a.html", "aff_unique_index": "0;0;1;0;2", "aff_unique_norm": "Massachusetts Institute of Technology;Northeastern University;Boeing Research & Technology", "aff_unique_dep": "Laboratory for Information and Decision Systems (LIDS);College of Computer and Information Science;", "aff_unique_url": "https://web.mit.edu;https://www.northeastern.edu;https://www.boeing.com/research-technology/", "aff_unique_abbr": "MIT;NU;Boeing R&T", "aff_campus_unique_index": "0;0;1;0;2", "aff_campus_unique": "Cambridge;Boston;Seattle", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Deep Generative Models for Relational Data with Side Information", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/712", "id": "712", "author_site": "Changwei Hu, Piyush Rai, Lawrence Carin", "author": "Changwei Hu; Piyush Rai; Lawrence Carin", "abstract": "We present a probabilistic framework for overlapping community discovery and link prediction for relational data, given as a graph. The proposed framework has: (1) a deep architecture which enables us to infer multiple layers of latent features/communities for each node, providing superior link prediction performance on more complex networks and better interpretability of the latent features; and (2) a regression model which allows directly conditioning the node latent features on the side information available in form of node attributes. Our framework handles both (1) and (2) via a clean, unified model, which enjoys full local conjugacy via data augmentation, and facilitates efficient inference via closed form Gibbs sampling. Moreover, inference cost scales in the number of edges which is attractive for massive but sparse networks. Our framework is also easily extendable to model weighted networks with count-valued edges. We compare with various state-of-the-art methods and report results, both quantitative and qualitative, on several benchmark data sets.", "bibtex": "@InProceedings{pmlr-v70-hu17d,\n title = \t {Deep Generative Models for Relational Data with Side Information},\n author = {Changwei Hu and Piyush Rai and Lawrence Carin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1578--1586},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hu17d/hu17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/hu17d.html},\n abstract = \t {We present a probabilistic framework for overlapping community discovery and link prediction for relational data, given as a graph. The proposed framework has: (1) a deep architecture which enables us to infer multiple layers of latent features/communities for each node, providing superior link prediction performance on more complex networks and better interpretability of the latent features; and (2) a regression model which allows directly conditioning the node latent features on the side information available in form of node attributes. Our framework handles both (1) and (2) via a clean, unified model, which enjoys full local conjugacy via data augmentation, and facilitates efficient inference via closed form Gibbs sampling. Moreover, inference cost scales in the number of edges which is attractive for massive but sparse networks. Our framework is also easily extendable to model weighted networks with count-valued edges. We compare with various state-of-the-art methods and report results, both quantitative and qualitative, on several benchmark data sets.}\n}", "pdf": "http://proceedings.mlr.press/v70/hu17d/hu17d.pdf", "supp": "", "pdf_size": 429938, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8076954898102172882&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Yahoo! Research, New York, NY, USA+Duke University, Durham, NC, USA; CSE Department, IIT Kanpur, Kanpur, UP, India; Duke University, Durham, NC, USA", "aff_domain": "yahoo-inc.com;cse.iitk.ac.in;duke.edu", "email": "yahoo-inc.com;cse.iitk.ac.in;duke.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hu17d.html", "aff_unique_index": "0+1;2;1", "aff_unique_norm": "Yahoo! Research;Duke University;Indian Institute of Technology Kanpur", "aff_unique_dep": ";;Computer Science and Engineering", "aff_unique_url": "https://research.yahoo.com;https://www.duke.edu;https://www.iitk.ac.in", "aff_unique_abbr": "Yahoo! Res;Duke;IIT Kanpur", "aff_campus_unique_index": "0+1;2;1", "aff_campus_unique": "New York;Durham;Kanpur", "aff_country_unique_index": "0+0;1;0", "aff_country_unique": "United States;India" }, { "title": "Deep IV: A Flexible Approach for Counterfactual Prediction", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/883", "id": "883", "author_site": "Jason Hartford, Greg Lewis, Kevin Leyton-Brown, Matt Taddy", "author": "Jason Hartford; Greg Lewis; Kevin Leyton-Brown; Matt Taddy", "abstract": "Counterfactual prediction requires understanding causal relationships between so-called treatment and outcome variables. This paper provides a recipe for augmenting deep learning methods to accurately characterize such relationships in the presence of instrument variables (IVs) \u2013 sources of treatment randomization that are conditionally independent from the outcomes. Our IV specification resolves into two prediction tasks that can be solved with deep neural nets: a first-stage network for treatment prediction and a second-stage network whose loss function involves integration over the conditional treatment distribution. This Deep IV framework allows us to take advantage of off-the-shelf supervised learning techniques to estimate causal effects by adapting the loss function. Experiments show that it outperforms existing machine learning approaches.", "bibtex": "@InProceedings{pmlr-v70-hartford17a,\n title = \t {Deep {IV}: A Flexible Approach for Counterfactual Prediction},\n author = {Jason Hartford and Greg Lewis and Kevin Leyton-Brown and Matt Taddy},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1414--1423},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hartford17a/hartford17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hartford17a.html},\n abstract = \t {Counterfactual prediction requires understanding causal relationships between so-called treatment and outcome variables. This paper provides a recipe for augmenting deep learning methods to accurately characterize such relationships in the presence of instrument variables (IVs) \u2013 sources of treatment randomization that are conditionally independent from the outcomes. Our IV specification resolves into two prediction tasks that can be solved with deep neural nets: a first-stage network for treatment prediction and a second-stage network whose loss function involves integration over the conditional treatment distribution. This Deep IV framework allows us to take advantage of off-the-shelf supervised learning techniques to estimate causal effects by adapting the loss function. Experiments show that it outperforms existing machine learning approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/hartford17a/hartford17a.pdf", "supp": "", "pdf_size": 216900, "gs_citation": 413, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11287742981019368143&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of British Columbia, Canada; Microsoft Research, New England, USA; University of British Columbia, Canada; Microsoft Research, New England, USA", "aff_domain": "cs.ubc.ca; ; ;microsoft.com", "email": "cs.ubc.ca; ; ;microsoft.com", "github": "https://github.com/jhartford/DeepIV", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/hartford17a.html", "aff_unique_index": "0;1;0;1", "aff_unique_norm": "University of British Columbia;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.ubc.ca;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UBC;MSR", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";New England", "aff_country_unique_index": "0;1;0;1", "aff_country_unique": "Canada;United States" }, { "title": "Deep Latent Dirichlet Allocation with Topic-Layer-Adaptive Stochastic Gradient Riemannian MCMC", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/703", "id": "703", "author_site": "Yulai Cong, Bo Chen, Hongwei Liu, Mingyuan Zhou", "author": "Yulai Cong; Bo Chen; Hongwei Liu; Mingyuan Zhou", "abstract": "It is challenging to develop stochastic gradient based scalable inference for deep discrete latent variable models (LVMs), due to the difficulties in not only computing the gradients, but also adapting the step sizes to different latent factors and hidden layers. For the Poisson gamma belief network (PGBN), a recently proposed deep discrete LVM, we derive an alternative representation that is referred to as deep latent Dirichlet allocation (DLDA). Exploiting data augmentation and marginalization techniques, we derive a block-diagonal Fisher information matrix and its inverse for the simplex-constrained global model parameters of DLDA. Exploiting that Fisher information matrix with stochastic gradient MCMC, we present topic-layer-adaptive stochastic gradient Riemannian (TLASGR) MCMC that jointly learns simplex-constrained global parameters across all layers and topics, with topic and layer specific learning rates. State-of-the-art results are demonstrated on big data sets.", "bibtex": "@InProceedings{pmlr-v70-cong17a,\n title = \t {Deep Latent {D}irichlet Allocation with Topic-Layer-Adaptive Stochastic Gradient {R}iemannian {MCMC}},\n author = {Yulai Cong and Bo Chen and Hongwei Liu and Mingyuan Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {864--873},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cong17a/cong17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cong17a.html},\n abstract = \t {It is challenging to develop stochastic gradient based scalable inference for deep discrete latent variable models (LVMs), due to the difficulties in not only computing the gradients, but also adapting the step sizes to different latent factors and hidden layers. For the Poisson gamma belief network (PGBN), a recently proposed deep discrete LVM, we derive an alternative representation that is referred to as deep latent Dirichlet allocation (DLDA). Exploiting data augmentation and marginalization techniques, we derive a block-diagonal Fisher information matrix and its inverse for the simplex-constrained global model parameters of DLDA. Exploiting that Fisher information matrix with stochastic gradient MCMC, we present topic-layer-adaptive stochastic gradient Riemannian (TLASGR) MCMC that jointly learns simplex-constrained global parameters across all layers and topics, with topic and layer specific learning rates. State-of-the-art results are demonstrated on big data sets.}\n}", "pdf": "http://proceedings.mlr.press/v70/cong17a/cong17a.pdf", "supp": "", "pdf_size": 1650747, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3344216215391384151&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "National Laboratory of Radar Signal Processing, Collaborative Innovation Center of Information Sensing and Understanding, Xidian University, Xi'an, China; National Laboratory of Radar Signal Processing, Collaborative Innovation Center of Information Sensing and Understanding, Xidian University, Xi'an, China; National Laboratory of Radar Signal Processing, Collaborative Innovation Center of Information Sensing and Understanding, Xidian University, Xi'an, China; McCombs School of Business, The University of Texas at Austin, Austin, TX 78712, USA", "aff_domain": "mail.xidian.edu.cn;mccombs.utexas.edu; ; ", "email": "mail.xidian.edu.cn;mccombs.utexas.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/cong17a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Xidian University;University of Texas at Austin", "aff_unique_dep": "National Laboratory of Radar Signal Processing;McCombs School of Business", "aff_unique_url": "http://www.xidian.edu.cn/;https://www.mccombs.utexas.edu", "aff_unique_abbr": "Xidian;UT Austin", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Xi'an;Austin", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "China;United States" }, { "title": "Deep Spectral Clustering Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/494", "id": "494", "author_site": "Marc Law, Raquel Urtasun, Richard Zemel", "author": "Marc T. Law; Raquel Urtasun; Richard S. Zemel", "abstract": "Clustering is the task of grouping a set of examples so that similar examples are grouped into the same cluster while dissimilar examples are in different clusters. The quality of a clustering depends on two problem-dependent factors which are i) the chosen similarity metric and ii) the data representation. Supervised clustering approaches, which exploit labeled partitioned datasets have thus been proposed, for instance to learn a metric optimized to perform clustering. However, most of these approaches assume that the representation of the data is fixed and then learn an appropriate linear transformation. Some deep supervised clustering learning approaches have also been proposed. However, they rely on iterative methods to compute gradients resulting in high algorithmic complexity. In this paper, we propose a deep supervised clustering metric learning method that formulates a novel loss function. We derive a closed-form expression for the gradient that is efficient to compute: the complexity to compute the gradient is linear in the size of the training mini-batch and quadratic in the representation dimensionality. We further reveal how our approach can be seen as learning spectral clustering. Experiments on standard real-world datasets confirm state-of-the-art Recall@K performance.", "bibtex": "@InProceedings{pmlr-v70-law17a,\n title = \t {Deep Spectral Clustering Learning},\n author = {Marc T. Law and Raquel Urtasun and Richard S. Zemel},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1985--1994},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/law17a/law17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/law17a.html},\n abstract = \t {Clustering is the task of grouping a set of examples so that similar examples are grouped into the same cluster while dissimilar examples are in different clusters. The quality of a clustering depends on two problem-dependent factors which are i) the chosen similarity metric and ii) the data representation. Supervised clustering approaches, which exploit labeled partitioned datasets have thus been proposed, for instance to learn a metric optimized to perform clustering. However, most of these approaches assume that the representation of the data is fixed and then learn an appropriate linear transformation. Some deep supervised clustering learning approaches have also been proposed. However, they rely on iterative methods to compute gradients resulting in high algorithmic complexity. In this paper, we propose a deep supervised clustering metric learning method that formulates a novel loss function. We derive a closed-form expression for the gradient that is efficient to compute: the complexity to compute the gradient is linear in the size of the training mini-batch and quadratic in the representation dimensionality. We further reveal how our approach can be seen as learning spectral clustering. Experiments on standard real-world datasets confirm state-of-the-art Recall@K performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/law17a/law17a.pdf", "supp": "", "pdf_size": 409881, "gs_citation": 164, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13518147976059771104&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto; Department of Computer Science, University of Toronto + CIFAR Senior Fellow", "aff_domain": "cs.toronto.edu; ; ", "email": "cs.toronto.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/law17a.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "University of Toronto;CIFAR", "aff_unique_dep": "Department of Computer Science;Senior Fellow", "aff_unique_url": "https://www.utoronto.ca;https://www.cifar.ca", "aff_unique_abbr": "U of T;CIFAR", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Toronto;", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "Canada" }, { "title": "Deep Tensor Convolution on Multicores", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/465", "id": "465", "author_site": "David Budden, Alexander Matveev, Shibani Santurkar, Shraman Ray Chaudhuri, Nir Shavit", "author": "David Budden; Alexander Matveev; Shibani Santurkar; Shraman Ray Chaudhuri; Nir Shavit", "abstract": "Deep convolutional neural networks (ConvNets) of 3-dimensional kernels allow joint modeling of spatiotemporal features. These networks have improved performance of video and volumetric image analysis, but have been limited in size due to the low memory ceiling of GPU hardware. Existing CPU implementations overcome this constraint but are impractically slow. Here we extend and optimize the faster Winograd-class of convolutional algorithms to the $N$-dimensional case and specifically for CPU hardware. First, we remove the need to manually hand-craft algorithms by exploiting the relaxed constraints and cheap sparse access of CPU memory. Second, we maximize CPU utilization and multicore scalability by transforming data matrices to be cache-aware, integer multiples of AVX vector widths. Treating 2-dimensional ConvNets as a special (and the least beneficial) case of our approach, we demonstrate a 5 to 25-fold improvement in throughput compared to previous state-of-the-art.", "bibtex": "@InProceedings{pmlr-v70-budden17a,\n title = \t {Deep Tensor Convolution on Multicores},\n author = {David Budden and Alexander Matveev and Shibani Santurkar and Shraman Ray Chaudhuri and Nir Shavit},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {615--624},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/budden17a/budden17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/budden17a.html},\n abstract = \t {Deep convolutional neural networks (ConvNets) of 3-dimensional kernels allow joint modeling of spatiotemporal features. These networks have improved performance of video and volumetric image analysis, but have been limited in size due to the low memory ceiling of GPU hardware. Existing CPU implementations overcome this constraint but are impractically slow. Here we extend and optimize the faster Winograd-class of convolutional algorithms to the $N$-dimensional case and specifically for CPU hardware. First, we remove the need to manually hand-craft algorithms by exploiting the relaxed constraints and cheap sparse access of CPU memory. Second, we maximize CPU utilization and multicore scalability by transforming data matrices to be cache-aware, integer multiples of AVX vector widths. Treating 2-dimensional ConvNets as a special (and the least beneficial) case of our approach, we demonstrate a 5 to 25-fold improvement in throughput compared to previous state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v70/budden17a/budden17a.pdf", "supp": "", "pdf_size": 2774570, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7806077637019356943&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "csail.mit.edu; ; ; ; ", "email": "csail.mit.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/budden17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Deep Transfer Learning with Joint Adaptation Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/470", "id": "470", "author_site": "Mingsheng Long, Han Zhu, Jianmin Wang, Michael Jordan", "author": "Mingsheng Long; Han Zhu; Jianmin Wang; Michael I. Jordan", "abstract": "Deep networks have been successfully applied to learn transferable features for adapting models from a source domain to a different target domain. In this paper, we present joint adaptation networks (JAN), which learn a transfer network by aligning the joint distributions of multiple domain-specific layers across domains based on a joint maximum mean discrepancy (JMMD) criterion. Adversarial training strategy is adopted to maximize JMMD such that the distributions of the source and target domains are made more distinguishable. Learning can be performed by stochastic gradient descent with the gradients computed by back-propagation in linear-time. Experiments testify that our model yields state of the art results on standard datasets.", "bibtex": "@InProceedings{pmlr-v70-long17a,\n title = \t {Deep Transfer Learning with Joint Adaptation Networks},\n author = {Mingsheng Long and Han Zhu and Jianmin Wang and Michael I. Jordan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2208--2217},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/long17a/long17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/long17a.html},\n abstract = \t {Deep networks have been successfully applied to learn transferable features for adapting models from a source domain to a different target domain. In this paper, we present joint adaptation networks (JAN), which learn a transfer network by aligning the joint distributions of multiple domain-specific layers across domains based on a joint maximum mean discrepancy (JMMD) criterion. Adversarial training strategy is adopted to maximize JMMD such that the distributions of the source and target domains are made more distinguishable. Learning can be performed by stochastic gradient descent with the gradients computed by back-propagation in linear-time. Experiments testify that our model yields state of the art results on standard datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/long17a/long17a.pdf", "supp": "", "pdf_size": 523493, "gs_citation": 3217, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15055864969058081042&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/long17a.html" }, { "title": "Deep Value Networks Learn to Evaluate and Iteratively Refine Structured Outputs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/534", "id": "534", "author_site": "Michael Gygli, Mohammad Norouzi, Anelia Angelova", "author": "Michael Gygli; Mohammad Norouzi; Anelia Angelova", "abstract": "We approach structured output prediction by optimizing a deep value network (DVN) to precisely estimate the task loss on different output configurations for a given input. Once the model is trained, we perform inference by gradient descent on the continuous relaxations of the output variables to find outputs with promising scores from the value network. When applied to image segmentation, the value network takes an image and a segmentation mask as inputs and predicts a scalar estimating the intersection over union between the input and ground truth masks. For multi-label classification, the DVN\u2019s objective is to correctly predict the F1 score for any potential label configuration. The DVN framework achieves the state-of-the-art results on multi-label prediction and image segmentation benchmarks.", "bibtex": "@InProceedings{pmlr-v70-gygli17a,\n title = \t {Deep Value Networks Learn to Evaluate and Iteratively Refine Structured Outputs},\n author = {Michael Gygli and Mohammad Norouzi and Anelia Angelova},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1341--1351},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gygli17a/gygli17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gygli17a.html},\n abstract = \t {We approach structured output prediction by optimizing a deep value network (DVN) to precisely estimate the task loss on different output configurations for a given input. Once the model is trained, we perform inference by gradient descent on the continuous relaxations of the output variables to find outputs with promising scores from the value network. When applied to image segmentation, the value network takes an image and a segmentation mask as inputs and predicts a scalar estimating the intersection over union between the input and ground truth masks. For multi-label classification, the DVN\u2019s objective is to correctly predict the F1 score for any potential label configuration. The DVN framework achieves the state-of-the-art results on multi-label prediction and image segmentation benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v70/gygli17a/gygli17a.pdf", "supp": "", "pdf_size": 820257, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2622631964942655791&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "ETH Z\u00fcrich & gifs.com; Google Brain, Mountain View, USA; Google Brain, Mountain View, USA", "aff_domain": "vision.ee.ethz.ch;google.com; ", "email": "vision.ee.ethz.ch;google.com; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/gygli17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "ETH Zurich;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://www.ethz.ch;https://brain.google.com", "aff_unique_abbr": "ETH;Google Brain", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;1;1", "aff_country_unique": "Switzerland;United States" }, { "title": "Deep Voice: Real-time Neural Text-to-Speech", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/646", "id": "646", "author_site": "Andrew Gibiansky, Mike Chrzanowski, Mohammad Shoeybi, Shubho Sengupta, Gregory Diamos, Sercan Arik, Jonathan Raiman, John Miller, Xian Li, Yongguo Kang, Adam Coates, Andrew Ng", "author": "Sercan \u00d6. Ar\u0131k; Mike Chrzanowski; Adam Coates; Gregory Diamos; Andrew Gibiansky; Yongguo Kang; Xian Li; John Miller; Andrew Ng; Jonathan Raiman; Shubho Sengupta; Mohammad Shoeybi", "abstract": "We present Deep Voice, a production-quality text-to-speech system constructed entirely from deep neural networks. Deep Voice lays the groundwork for truly end-to-end neural speech synthesis. The system comprises five major building blocks: a segmentation model for locating phoneme boundaries, a grapheme-to-phoneme conversion model, a phoneme duration prediction model, a fundamental frequency prediction model, and an audio synthesis model. For the segmentation model, we propose a novel way of performing phoneme boundary detection with deep neural networks using connectionist temporal classification (CTC) loss. For the audio synthesis model, we implement a variant of WaveNet that requires fewer parameters and trains faster than the original. By using a neural network for each component, our system is simpler and more flexible than traditional text-to-speech systems, where each component requires laborious feature engineering and extensive domain expertise. Finally, we show that inference with our system can be performed faster than real time and describe optimized WaveNet inference kernels on both CPU and GPU that achieve up to 400x speedups over existing implementations.", "bibtex": "@InProceedings{pmlr-v70-arik17a,\n title = \t {Deep Voice: Real-time Neural Text-to-Speech},\n author = {Sercan {\\\"O}. Ar{\\i}k and Mike Chrzanowski and Adam Coates and Gregory Diamos and Andrew Gibiansky and Yongguo Kang and Xian Li and John Miller and Andrew Ng and Jonathan Raiman and Shubho Sengupta and Mohammad Shoeybi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {195--204},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/arik17a/arik17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/arik17a.html},\n abstract = \t {We present Deep Voice, a production-quality text-to-speech system constructed entirely from deep neural networks. Deep Voice lays the groundwork for truly end-to-end neural speech synthesis. The system comprises five major building blocks: a segmentation model for locating phoneme boundaries, a grapheme-to-phoneme conversion model, a phoneme duration prediction model, a fundamental frequency prediction model, and an audio synthesis model. For the segmentation model, we propose a novel way of performing phoneme boundary detection with deep neural networks using connectionist temporal classification (CTC) loss. For the audio synthesis model, we implement a variant of WaveNet that requires fewer parameters and trains faster than the original. By using a neural network for each component, our system is simpler and more flexible than traditional text-to-speech systems, where each component requires laborious feature engineering and extensive domain expertise. Finally, we show that inference with our system can be performed faster than real time and describe optimized WaveNet inference kernels on both CPU and GPU that achieve up to 400x speedups over existing implementations.}\n}", "pdf": "http://proceedings.mlr.press/v70/arik17a/arik17a.pdf", "supp": "", "pdf_size": 397202, "gs_citation": 877, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18296399576126585694&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Corporation + Baidu Silicon Valley Artificial Intelligence Lab; Baidu Corporation + Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab; Baidu Silicon Valley Artificial Intelligence Lab", "aff_domain": "baidu.com; ; ; ; ; ; ; ; ; ; ; ", "email": "baidu.com; ; ; ; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 12, "oa": "https://proceedings.mlr.press/v70/arik17a.html", "aff_unique_index": "0;0;0;0;0;0+0;0+0;0;0;0;0;0", "aff_unique_norm": "Baidu", "aff_unique_dep": "Artificial Intelligence Lab", "aff_unique_url": "https://www.baidu.com", "aff_unique_abbr": "Baidu AI", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "Silicon Valley;", "aff_country_unique_index": "0;0;0;0;0;1+0;1+0;0;0;0;0;0", "aff_country_unique": "United States;China" }, { "title": "DeepBach: a Steerable Model for Bach Chorales Generation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/731", "id": "731", "author_site": "Ga\u00ebtan HADJERES, Fran\u00e7ois Pachet, Frank Nielsen", "author": "Ga\u00ebtan Hadjeres; Fran\u00e7ois Pachet; Frank Nielsen", "abstract": "This paper introduces DeepBach, a graphical model aimed at modeling polyphonic music and specifically hymn-like pieces. We claim that, after being trained on the chorale harmonizations by Johann Sebastian Bach, our model is capable of generating highly convincing chorales in the style of Bach. DeepBach\u2019s strength comes from the use of pseudo-Gibbs sampling coupled with an adapted representation of musical data. This is in contrast with many automatic music composition approaches which tend to compose music sequentially. Our model is also steerable in the sense that a user can constrain the generation by imposing positional constraints such as notes, rhythms or cadences in the generated score. We also provide a plugin on top of the MuseScore music editor making the interaction with DeepBach easy to use.", "bibtex": "@InProceedings{pmlr-v70-hadjeres17a,\n title = \t {{D}eep{B}ach: a Steerable Model for {B}ach Chorales Generation},\n author = {Ga{\\\"e}tan Hadjeres and Fran{\\c{c}}ois Pachet and Frank Nielsen},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1362--1371},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hadjeres17a/hadjeres17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hadjeres17a.html},\n abstract = \t {This paper introduces DeepBach, a graphical model aimed at modeling polyphonic music and specifically hymn-like pieces. We claim that, after being trained on the chorale harmonizations by Johann Sebastian Bach, our model is capable of generating highly convincing chorales in the style of Bach. DeepBach\u2019s strength comes from the use of pseudo-Gibbs sampling coupled with an adapted representation of musical data. This is in contrast with many automatic music composition approaches which tend to compose music sequentially. Our model is also steerable in the sense that a user can constrain the generation by imposing positional constraints such as notes, rhythms or cadences in the generated score. We also provide a plugin on top of the MuseScore music editor making the interaction with DeepBach easy to use.}\n}", "pdf": "http://proceedings.mlr.press/v70/hadjeres17a/hadjeres17a.pdf", "supp": "", "pdf_size": 1337307, "gs_citation": 651, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4953274680350121270&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "LIP6, Universit \u00b4e Pierre et Marie Curie+Sony CSL, Paris; LIP6, Universit \u00b4e Pierre et Marie Curie+Sony CSL, Paris; Sony CSL, Japan", "aff_domain": "etu.upmc.fr;gmail.com;acm.org", "email": "etu.upmc.fr;gmail.com;acm.org", "github": "", "project": "https://www.youtube.com/watch?v=73WF0M99vlg", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hadjeres17a.html", "aff_unique_index": "0+1;0+1;1", "aff_unique_norm": "Universit \u00b4e Pierre et Marie Curie;Sony Computer Science Laboratories", "aff_unique_dep": "LIP6;Computer Science Laboratories", "aff_unique_url": "https://www.upmc.fr;https://www.csl.sony.fr", "aff_unique_abbr": "UPMC;Sony CSL", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Paris", "aff_country_unique_index": "0+0;0+0;1", "aff_country_unique": "France;Japan" }, { "title": "Deeply AggreVaTeD: Differentiable Imitation Learning for Sequential Prediction", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/709", "id": "709", "author_site": "Wen Sun, Arun Venkatraman, Geoff Gordon, Byron Boots, Drew Bagnell", "author": "Wen Sun; Arun Venkatraman; Geoffrey J. Gordon; Byron Boots; J. Andrew Bagnell", "abstract": "Recently, researchers have demonstrated state-of-the-art performance on sequential prediction problems using deep neural networks and Reinforcement Learning (RL). For some of these problems, oracles that can demonstrate good performance may be available during training, but are not used by plain RL methods. To take advantage of this extra information, we propose AggreVaTeD, an extension of the Imitation Learning (IL) approach of Ross \\& Bagnell (2014). AggreVaTeD allows us to use expressive differentiable policy representations such as deep networks, while leveraging training-time oracles to achieve faster and more accurate solutions with less training data. Specifically, we present two gradient procedures that can learn neural network policies for several problems, including a sequential prediction task and several high-dimensional robotics control problems. We also provide a comprehensive theoretical study of IL that demonstrates that we can expect up to exponentially-lower sample complexity for learning with AggreVaTeD than with plain RL algorithms. Our results and theory indicate that IL (and AggreVaTeD in particular) can be a more effective strategy for sequential prediction than plain RL.", "bibtex": "@InProceedings{pmlr-v70-sun17d,\n title = \t {Deeply {A}ggre{V}a{T}e{D}: Differentiable Imitation Learning for Sequential Prediction},\n author = {Wen Sun and Arun Venkatraman and Geoffrey J. Gordon and Byron Boots and J. Andrew Bagnell},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3309--3318},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sun17d/sun17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/sun17d.html},\n abstract = \t {Recently, researchers have demonstrated state-of-the-art performance on sequential prediction problems using deep neural networks and Reinforcement Learning (RL). For some of these problems, oracles that can demonstrate good performance may be available during training, but are not used by plain RL methods. To take advantage of this extra information, we propose AggreVaTeD, an extension of the Imitation Learning (IL) approach of Ross \\& Bagnell (2014). AggreVaTeD allows us to use expressive differentiable policy representations such as deep networks, while leveraging training-time oracles to achieve faster and more accurate solutions with less training data. Specifically, we present two gradient procedures that can learn neural network policies for several problems, including a sequential prediction task and several high-dimensional robotics control problems. We also provide a comprehensive theoretical study of IL that demonstrates that we can expect up to exponentially-lower sample complexity for learning with AggreVaTeD than with plain RL algorithms. Our results and theory indicate that IL (and AggreVaTeD in particular) can be a more effective strategy for sequential prediction than plain RL.}\n}", "pdf": "http://proceedings.mlr.press/v70/sun17d/sun17d.pdf", "supp": "", "pdf_size": 738282, "gs_citation": 299, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18079478369913106042&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Robotics Institute, Carnegie Mellon University, USA; Robotics Institute, Carnegie Mellon University, USA; Machine Learning Department, Carnegie Mellon University, USA; College of Computing, Georgia Institute of Technology, USA; Robotics Institute, Carnegie Mellon University, USA", "aff_domain": "cs.cmu.edu; ; ; ; ", "email": "cs.cmu.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/sun17d.html", "aff_unique_index": "0;0;0;1;0", "aff_unique_norm": "Carnegie Mellon University;Georgia Institute of Technology", "aff_unique_dep": "Robotics Institute;College of Computing", "aff_unique_url": "https://www.cmu.edu;https://www.gatech.edu", "aff_unique_abbr": "CMU;Georgia Tech", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Deletion-Robust Submodular Maximization: Data Summarization with \u201cthe Right to be Forgotten\u201d", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/698", "id": "698", "author_site": "Baharan Mirzasoleiman, Amin Karbasi, Andreas Krause", "author": "Baharan Mirzasoleiman; Amin Karbasi; Andreas Krause", "abstract": "How can we summarize a dynamic data stream when elements selected for the summary can be deleted at any time? This is an important challenge in online services, where the users generating the data may decide to exercise their right to restrict the service provider from using (part of) their data due to privacy concerns. Motivated by this challenge, we introduce the dynamic deletion-robust submodular maximization problem. We develop the first resilient streaming algorithm, called ROBUST-STREAMING, with a constant factor approximation guarantee to the optimum solution. We evaluate the effectiveness of our approach on several real-world applica tions, including summarizing (1) streams of geo-coordinates (2); streams of images; and (3) click-stream log data, consisting of 45 million feature vectors from a news recommendation task.", "bibtex": "@InProceedings{pmlr-v70-mirzasoleiman17a,\n title = \t {Deletion-Robust Submodular Maximization: Data Summarization with ``the Right to be Forgotten''},\n author = {Baharan Mirzasoleiman and Amin Karbasi and Andreas Krause},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2449--2458},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mirzasoleiman17a/mirzasoleiman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mirzasoleiman17a.html},\n abstract = \t {How can we summarize a dynamic data stream when elements selected for the summary can be deleted at any time? This is an important challenge in online services, where the users generating the data may decide to exercise their right to restrict the service provider from using (part of) their data due to privacy concerns. Motivated by this challenge, we introduce the dynamic deletion-robust submodular maximization problem. We develop the first resilient streaming algorithm, called ROBUST-STREAMING, with a constant factor approximation guarantee to the optimum solution. We evaluate the effectiveness of our approach on several real-world applica tions, including summarizing (1) streams of geo-coordinates (2); streams of images; and (3) click-stream log data, consisting of 45 million feature vectors from a news recommendation task.}\n}", "pdf": "http://proceedings.mlr.press/v70/mirzasoleiman17a/mirzasoleiman17a.pdf", "supp": "", "pdf_size": 2328108, "gs_citation": 97, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13345570047702423427&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "ETH Zurich, Switzerland; Yale University, New Haven, USA; ETH Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ; ", "email": "inf.ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mirzasoleiman17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "ETH Zurich;Yale University", "aff_unique_dep": ";", "aff_unique_url": "https://www.ethz.ch;https://www.yale.edu", "aff_unique_abbr": "ETHZ;Yale", "aff_campus_unique_index": "1", "aff_campus_unique": ";New Haven", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Switzerland;United States" }, { "title": "Delta Networks for Optimized Recurrent Network Computation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/535", "id": "535", "author_site": "Daniel Neil, Jun Lee, Tobi Delbruck, Shih-Chii Liu", "author": "Daniel Neil; Jun Haeng Lee; Tobi Delbruck; Shih-Chii Liu", "abstract": "Many neural networks exhibit stability in their activation patterns over time in response to inputs from sensors operating under real-world conditions. By capitalizing on this property of natural signals, we propose a Recurrent Neural Network (RNN) architecture called a delta network in which each neuron transmits its value only when the change in its activation exceeds a threshold. The execution of RNNs as delta networks is attractive because their states must be stored and fetched at every timestep, unlike in convolutional neural networks (CNNs). We show that a naive run-time delta network implementation offers modest improvements on the number of memory accesses and computes, but optimized training techniques confer higher accuracy at higher speedup. With these optimizations, we demonstrate a 9X reduction in cost with negligible loss of accuracy for the TIDIGITS audio digit recognition benchmark. Similarly, on the large Wall Street Journal (WSJ) speech recognition benchmark, pretrained networks can also be greatly accelerated as delta networks and trained delta networks show a 5.7x improvement with negligible loss of accuracy. Finally, on an end-to-end CNN-RNN network trained for steering angle prediction in a driving dataset, the RNN cost can be reduced by a substantial 100X.", "bibtex": "@InProceedings{pmlr-v70-neil17a,\n title = \t {Delta Networks for Optimized Recurrent Network Computation},\n author = {Daniel Neil and Jun Haeng Lee and Tobi Delbruck and Shih-Chii Liu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2584--2593},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/neil17a/neil17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/neil17a.html},\n abstract = \t {Many neural networks exhibit stability in their activation patterns over time in response to inputs from sensors operating under real-world conditions. By capitalizing on this property of natural signals, we propose a Recurrent Neural Network (RNN) architecture called a delta network in which each neuron transmits its value only when the change in its activation exceeds a threshold. The execution of RNNs as delta networks is attractive because their states must be stored and fetched at every timestep, unlike in convolutional neural networks (CNNs). We show that a naive run-time delta network implementation offers modest improvements on the number of memory accesses and computes, but optimized training techniques confer higher accuracy at higher speedup. With these optimizations, we demonstrate a 9X reduction in cost with negligible loss of accuracy for the TIDIGITS audio digit recognition benchmark. Similarly, on the large Wall Street Journal (WSJ) speech recognition benchmark, pretrained networks can also be greatly accelerated as delta networks and trained delta networks show a 5.7x improvement with negligible loss of accuracy. Finally, on an end-to-end CNN-RNN network trained for steering angle prediction in a driving dataset, the RNN cost can be reduced by a substantial 100X.}\n}", "pdf": "http://proceedings.mlr.press/v70/neil17a/neil17a.pdf", "supp": "", "pdf_size": 877727, "gs_citation": 82, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6466382683312826120&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Institute of Neuroinformatics, UZH and ETH Zurich, Zurich, Switzerland; Samsung Advanced Institute of Technology, Samsung Electronics, Suwon-Si, Republic of Korea; Institute of Neuroinformatics, UZH and ETH Zurich, Zurich, Switzerland; Institute of Neuroinformatics, UZH and ETH Zurich, Zurich, Switzerland", "aff_domain": "ini.ethz.ch; ; ;ini.ethz.ch", "email": "ini.ethz.ch; ; ;ini.ethz.ch", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/neil17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Zurich and ETH Zurich;Samsung", "aff_unique_dep": "Institute of Neuroinformatics;Samsung Advanced Institute of Technology", "aff_unique_url": "https://www.neuro.ethz.ch/;https://www.sait.samsung.com", "aff_unique_abbr": "UZH & ETH;SAIT", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Zurich;Suwon-Si", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "Switzerland;South Korea" }, { "title": "Density Level Set Estimation on Manifolds with DBSCAN", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/552", "id": "552", "author": "Heinrich Jiang", "abstract": "We show that DBSCAN can estimate the connected components of the $\\lambda$-density level set $\\{ x : f(x) \\ge \\lambda\\}$ given $n$ i.i.d. samples from an unknown density $f$. We characterize the regularity of the level set boundaries using parameter $\\beta > 0$ and analyze the estimation error under the Hausdorff metric. When the data lies in $\\mathbb{R}^D$ we obtain a rate of $\\widetilde{O}(n^{-1/(2\\beta + D)})$, which matches known lower bounds up to logarithmic factors. When the data lies on an embedded unknown $d$-dimensional manifold in $\\mathbb{R}^D$, then we obtain a rate of $\\widetilde{O}(n^{-1/(2\\beta + d\\cdot \\max\\{1, \\beta \\})})$. Finally, we provide adaptive parameter tuning in order to attain these rates with no a priori knowledge of the intrinsic dimension, density, or $\\beta$.", "bibtex": "@InProceedings{pmlr-v70-jiang17a,\n title = \t {Density Level Set Estimation on Manifolds with {DBSCAN}},\n author = {Heinrich Jiang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1684--1693},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jiang17a/jiang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jiang17a.html},\n abstract = \t {We show that DBSCAN can estimate the connected components of the $\\lambda$-density level set $\\{ x : f(x) \\ge \\lambda\\}$ given $n$ i.i.d. samples from an unknown density $f$. We characterize the regularity of the level set boundaries using parameter $\\beta > 0$ and analyze the estimation error under the Hausdorff metric. When the data lies in $\\mathbb{R}^D$ we obtain a rate of $\\widetilde{O}(n^{-1/(2\\beta + D)})$, which matches known lower bounds up to logarithmic factors. When the data lies on an embedded unknown $d$-dimensional manifold in $\\mathbb{R}^D$, then we obtain a rate of $\\widetilde{O}(n^{-1/(2\\beta + d\\cdot \\max\\{1, \\beta \\})})$. Finally, we provide adaptive parameter tuning in order to attain these rates with no a priori knowledge of the intrinsic dimension, density, or $\\beta$.}\n}", "pdf": "http://proceedings.mlr.press/v70/jiang17a/jiang17a.pdf", "supp": "", "pdf_size": 274232, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14234691315790040201&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google", "aff_domain": "gmail.com", "email": "gmail.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/jiang17a.html", "aff_unique_index": "0", "aff_unique_norm": "Google", "aff_unique_dep": "Google", "aff_unique_url": "https://www.google.com", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Depth-Width Tradeoffs in Approximating Natural Functions with Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/593", "id": "593", "author_site": "Itay Safran, Ohad Shamir", "author": "Itay Safran; Ohad Shamir", "abstract": "We provide several new depth-based separation results for feed-forward neural networks, proving that various types of simple and natural functions can be better approximated using deeper networks than shallower ones, even if the shallower networks are much larger. This includes indicators of balls and ellipses; non-linear functions which are radial with respect to the $L_1$ norm; and smooth non-linear functions. We also show that these gaps can be observed experimentally: Increasing the depth indeed allows better learning than increasing width, when training neural networks to learn an indicator of a unit ball.", "bibtex": "@InProceedings{pmlr-v70-safran17a,\n title = \t {Depth-Width Tradeoffs in Approximating Natural Functions with Neural Networks},\n author = {Itay Safran and Ohad Shamir},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2979--2987},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/safran17a/safran17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/safran17a.html},\n abstract = \t {We provide several new depth-based separation results for feed-forward neural networks, proving that various types of simple and natural functions can be better approximated using deeper networks than shallower ones, even if the shallower networks are much larger. This includes indicators of balls and ellipses; non-linear functions which are radial with respect to the $L_1$ norm; and smooth non-linear functions. We also show that these gaps can be observed experimentally: Increasing the depth indeed allows better learning than increasing width, when training neural networks to learn an indicator of a unit ball.}\n}", "pdf": "http://proceedings.mlr.press/v70/safran17a/safran17a.pdf", "supp": "", "pdf_size": 430345, "gs_citation": 210, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1660367906649709066&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Weizmann Institute of Science, Rehovot, Israel; Weizmann Institute of Science, Rehovot, Israel", "aff_domain": "weizmann.ac.il;weizmann.ac.il", "email": "weizmann.ac.il;weizmann.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/safran17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Weizmann Institute of Science", "aff_unique_dep": "", "aff_unique_url": "https://www.weizmann.org.il", "aff_unique_abbr": "Weizmann", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Rehovot", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Deriving Neural Architectures from Sequence and Graph Kernels", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/797", "id": "797", "author_site": "Tao Lei, Wengong Jin, Regina Barzilay, Tommi Jaakkola", "author": "Tao Lei; Wengong Jin; Regina Barzilay; Tommi Jaakkola", "abstract": "The design of neural architectures for structured objects is typically guided by experimental insights rather than a formal process. In this work, we appeal to kernels over combinatorial structures, such as sequences and graphs, to derive appropriate neural operations. We introduce a class of deep recurrent neural operations and formally characterize their associated kernel spaces. Our recurrent modules compare the input to virtual reference objects (cf. filters in CNN) via the kernels. Similar to traditional neural operations, these reference objects are parameterized and directly optimized in end-to-end training. We empirically evaluate the proposed class of neural architectures on standard applications such as language modeling and molecular graph regression, achieving state-of-the-art results across these applications.", "bibtex": "@InProceedings{pmlr-v70-lei17a,\n title = \t {Deriving Neural Architectures from Sequence and Graph Kernels},\n author = {Tao Lei and Wengong Jin and Regina Barzilay and Tommi Jaakkola},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2024--2033},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lei17a/lei17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lei17a.html},\n abstract = \t {The design of neural architectures for structured objects is typically guided by experimental insights rather than a formal process. In this work, we appeal to kernels over combinatorial structures, such as sequences and graphs, to derive appropriate neural operations. We introduce a class of deep recurrent neural operations and formally characterize their associated kernel spaces. Our recurrent modules compare the input to virtual reference objects (cf. filters in CNN) via the kernels. Similar to traditional neural operations, these reference objects are parameterized and directly optimized in end-to-end training. We empirically evaluate the proposed class of neural architectures on standard applications such as language modeling and molecular graph regression, achieving state-of-the-art results across these applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/lei17a/lei17a.pdf", "supp": "", "pdf_size": 1035253, "gs_citation": 160, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8156858214590979113&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory", "aff_domain": "csail.mit.edu;csail.mit.edu; ; ", "email": "csail.mit.edu;csail.mit.edu; ; ", "github": "https://github.com/taolei87/icml17_knn", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/lei17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science & Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Developing Bug-Free Machine Learning Systems With Formal Mathematics", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/849", "id": "849", "author_site": "Daniel Selsam, Percy Liang, David L Dill", "author": "Daniel Selsam; Percy Liang; David L. Dill", "abstract": "Noisy data, non-convex objectives, model misspecification, and numerical instability can all cause undesired behaviors in machine learning systems. As a result, detecting actual implementation errors can be extremely difficult. We demonstrate a methodology in which developers use an interactive proof assistant to both implement their system and to state a formal theorem defining what it means for their system to be correct. The process of proving this theorem interactively in the proof assistant exposes all implementation errors since any error in the program would cause the proof to fail. As a case study, we implement a new system, Certigrad, for optimizing over stochastic computation graphs, and we generate a formal (i.e. machine-checkable) proof that the gradients sampled by the system are unbiased estimates of the true mathematical gradients. We train a variational autoencoder using Certigrad and find the performance comparable to training the same model in TensorFlow.", "bibtex": "@InProceedings{pmlr-v70-selsam17a,\n title = \t {Developing Bug-Free Machine Learning Systems With Formal Mathematics},\n author = {Daniel Selsam and Percy Liang and David L. Dill},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3047--3056},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/selsam17a/selsam17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/selsam17a.html},\n abstract = \t {Noisy data, non-convex objectives, model misspecification, and numerical instability can all cause undesired behaviors in machine learning systems. As a result, detecting actual implementation errors can be extremely difficult. We demonstrate a methodology in which developers use an interactive proof assistant to both implement their system and to state a formal theorem defining what it means for their system to be correct. The process of proving this theorem interactively in the proof assistant exposes all implementation errors since any error in the program would cause the proof to fail. As a case study, we implement a new system, Certigrad, for optimizing over stochastic computation graphs, and we generate a formal (i.e. machine-checkable) proof that the gradients sampled by the system are unbiased estimates of the true mathematical gradients. We train a variational autoencoder using Certigrad and find the performance comparable to training the same model in TensorFlow.}\n}", "pdf": "http://proceedings.mlr.press/v70/selsam17a/selsam17a.pdf", "supp": "", "pdf_size": 323580, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18283001560879550934&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/selsam17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Device Placement Optimization with Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/888", "id": "888", "author_site": "Azalia Mirhoseini, Hieu Pham, Quoc Le, benoit steiner, Mohammad Norouzi, Rasmus Larsen, Yuefeng Zhou, Naveen Kumar, Samy Bengio, Jeff Dean", "author": "Azalia Mirhoseini; Hieu Pham; Quoc V. Le; Benoit Steiner; Rasmus Larsen; Yuefeng Zhou; Naveen Kumar; Mohammad Norouzi; Samy Bengio; Jeff Dean", "abstract": "The past few years have witnessed a growth in size and computational requirements for training and inference with neural networks. Currently, a common approach to address these requirements is to use a heterogeneous distributed environment with a mixture of hardware devices such as CPUs and GPUs. Importantly, the decision of placing parts of the neural models on devices is often made by human experts based on simple heuristics and intuitions. In this paper, we propose a method which learns to optimize device placement for TensorFlow computational graphs. Key to our method is the use of a sequence-to-sequence model to predict which subsets of operations in a TensorFlow graph should run on which of the available devices. The execution time of the predicted placements is then used as the reward signal to optimize the parameters of the sequence-to-sequence model. Our main result is that on Inception-V3 for ImageNet classification, and on RNN LSTM, for language modeling and neural machine translation, our model finds non-trivial device placements that outperform hand-crafted heuristics and traditional algo-rithmic methods.", "bibtex": "@InProceedings{pmlr-v70-mirhoseini17a,\n title = \t {Device Placement Optimization with Reinforcement Learning},\n author = {Azalia Mirhoseini and Hieu Pham and Quoc V. Le and Benoit Steiner and Rasmus Larsen and Yuefeng Zhou and Naveen Kumar and Mohammad Norouzi and Samy Bengio and Jeff Dean},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2430--2439},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mirhoseini17a/mirhoseini17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mirhoseini17a.html},\n abstract = \t {The past few years have witnessed a growth in size and computational requirements for training and inference with neural networks. Currently, a common approach to address these requirements is to use a heterogeneous distributed environment with a mixture of hardware devices such as CPUs and GPUs. Importantly, the decision of placing parts of the neural models on devices is often made by human experts based on simple heuristics and intuitions. In this paper, we propose a method which learns to optimize device placement for TensorFlow computational graphs. Key to our method is the use of a sequence-to-sequence model to predict which subsets of operations in a TensorFlow graph should run on which of the available devices. The execution time of the predicted placements is then used as the reward signal to optimize the parameters of the sequence-to-sequence model. Our main result is that on Inception-V3 for ImageNet classification, and on RNN LSTM, for language modeling and neural machine translation, our model finds non-trivial device placements that outperform hand-crafted heuristics and traditional algo-rithmic methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/mirhoseini17a/mirhoseini17a.pdf", "supp": "", "pdf_size": 453248, "gs_citation": 556, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8945504786472306133&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 22, "aff": "Google Brain; Google Brain; Google Brain; Google Brain; Google Brain; Google Brain; Google; Google Brain; Google Brain; Google Brain", "aff_domain": "google.com;google.com; ; ; ; ; ;google.com; ; ", "email": "google.com;google.com; ; ; ; ; ;google.com; ; ", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v70/mirhoseini17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Diameter-Based Active Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/804", "id": "804", "author_site": "Christopher Tosh, Sanjoy Dasgupta", "author": "Christopher Tosh; Sanjoy Dasgupta", "abstract": "To date, the tightest upper and lower-bounds for the active learning of general concept classes have been in terms of a parameter of the learning problem called the splitting index. We provide, for the first time, an efficient algorithm that is able to realize this upper bound, and we empirically demonstrate its good performance.", "bibtex": "@InProceedings{pmlr-v70-tosh17a,\n title = \t {Diameter-Based Active Learning},\n author = {Christopher Tosh and Sanjoy Dasgupta},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3444--3452},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tosh17a/tosh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tosh17a.html},\n abstract = \t {To date, the tightest upper and lower-bounds for the active learning of general concept classes have been in terms of a parameter of the learning problem called the splitting index. We provide, for the first time, an efficient algorithm that is able to realize this upper bound, and we empirically demonstrate its good performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/tosh17a/tosh17a.pdf", "supp": "", "pdf_size": 1065745, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15658846616137070186&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science and Engineering, UC San Diego, La Jolla, CA, USA; Department of Computer Science and Engineering, UC San Diego, La Jolla, CA, USA", "aff_domain": "cs.ucsd.edu;cs.ucsd.edu", "email": "cs.ucsd.edu;cs.ucsd.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/tosh17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, San Diego", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ucsd.edu", "aff_unique_abbr": "UCSD", "aff_campus_unique_index": "0;0", "aff_campus_unique": "La Jolla", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Dictionary Learning Based on Sparse Distribution Tomography", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/662", "id": "662", "author_site": "Pedram Pad, Farnood Salehi, L. Elisa Celis, Patrick Thiran, Michael Unser", "author": "Pedram Pad; Farnood Salehi; Elisa Celis; Patrick Thiran; Michael Unser", "abstract": "We propose a new statistical dictionary learning algorithm for sparse signals that is based on an $\\alpha$-stable innovation model. The parameters of the underlying model\u2014that is, the atoms of the dictionary, the sparsity index $\\alpha$ and the dispersion of the transform-domain coefficients\u2014are recovered using a new type of probability distribution tomography. Specifically, we drive our estimator with a series of random projections of the data, which results in an efficient algorithm. Moreover, since the projections are achieved using linear combinations, we can invoke the generalized central limit theorem to justify the use of our method for sparse signals that are not necessarily $\\alpha$-stable. We evaluate our algorithm by performing two types of experiments: image in-painting and image denoising. In both cases, we find that our approach is competitive with state-of-the-art dictionary learning techniques. Beyond the algorithm itself, two aspects of this study are interesting in their own right. The first is our statistical formulation of the problem, which unifies the topics of dictionary learning and independent component analysis. The second is a generalization of a classical theorem about isometries of $\\ell_p$-norms that constitutes the foundation of our approach.", "bibtex": "@InProceedings{pmlr-v70-pad17a,\n title = \t {Dictionary Learning Based on Sparse Distribution Tomography},\n author = {Pedram Pad and Farnood Salehi and Elisa Celis and Patrick Thiran and Michael Unser},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2731--2740},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pad17a/pad17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pad17a.html},\n abstract = \t {We propose a new statistical dictionary learning algorithm for sparse signals that is based on an $\\alpha$-stable innovation model. The parameters of the underlying model\u2014that is, the atoms of the dictionary, the sparsity index $\\alpha$ and the dispersion of the transform-domain coefficients\u2014are recovered using a new type of probability distribution tomography. Specifically, we drive our estimator with a series of random projections of the data, which results in an efficient algorithm. Moreover, since the projections are achieved using linear combinations, we can invoke the generalized central limit theorem to justify the use of our method for sparse signals that are not necessarily $\\alpha$-stable. We evaluate our algorithm by performing two types of experiments: image in-painting and image denoising. In both cases, we find that our approach is competitive with state-of-the-art dictionary learning techniques. Beyond the algorithm itself, two aspects of this study are interesting in their own right. The first is our statistical formulation of the problem, which unifies the topics of dictionary learning and independent component analysis. The second is a generalization of a classical theorem about isometries of $\\ell_p$-norms that constitutes the foundation of our approach.}\n}", "pdf": "http://proceedings.mlr.press/v70/pad17a/pad17a.pdf", "supp": "", "pdf_size": 658579, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10778292886045644146&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Biomedical Imaging Group, EPFL, Lausanne, Switzerland+Computer Communications and Applications Laboratory 3, EPFL, Lausanne, Switzerland; Computer Communications and Applications Laboratory 3, EPFL, Lausanne, Switzerland; Computer Communications and Applications Laboratory 3, EPFL, Lausanne, Switzerland; Computer Communications and Applications Laboratory 3, EPFL, Lausanne, Switzerland; Biomedical Imaging Group, EPFL, Lausanne, Switzerland", "aff_domain": "epfl.ch; ; ; ; ", "email": "epfl.ch; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/pad17a.html", "aff_unique_index": "0+0;0;0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "Biomedical Imaging Group", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "0+0;0;0;0;0", "aff_campus_unique": "Lausanne", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Differentiable Programs with Neural Libraries", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/784", "id": "784", "author_site": "Alex Gaunt, Marc Brockschmidt, Nate Kushman, Daniel Tarlow", "author": "Alexander L. Gaunt; Marc Brockschmidt; Nate Kushman; Daniel Tarlow", "abstract": "We develop a framework for combining differentiable programming languages with neural networks. Using this framework we create end-to-end trainable systems that learn to write interpretable algorithms with perceptual components. We explore the benefits of inductive biases for strong generalization and modularity that come from the program-like structure of our models. In particular, modularity allows us to learn a library of (neural) functions which grows and improves as more tasks are solved. Empirically, we show that this leads to lifelong learning systems that transfer knowledge to new tasks more effectively than baselines.", "bibtex": "@InProceedings{pmlr-v70-gaunt17a,\n title = \t {Differentiable Programs with Neural Libraries},\n author = {Alexander L. Gaunt and Marc Brockschmidt and Nate Kushman and Daniel Tarlow},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1213--1222},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gaunt17a/gaunt17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gaunt17a.html},\n abstract = \t {We develop a framework for combining differentiable programming languages with neural networks. Using this framework we create end-to-end trainable systems that learn to write interpretable algorithms with perceptual components. We explore the benefits of inductive biases for strong generalization and modularity that come from the program-like structure of our models. In particular, modularity allows us to learn a library of (neural) functions which grows and improves as more tasks are solved. Empirically, we show that this leads to lifelong learning systems that transfer knowledge to new tasks more effectively than baselines.}\n}", "pdf": "http://proceedings.mlr.press/v70/gaunt17a/gaunt17a.pdf", "supp": "", "pdf_size": 2386613, "gs_citation": 75, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3778400382790148536&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Microsoft Research, Cambridge, UK; Microsoft Research, Cambridge, UK; Microsoft Research, Cambridge, UK; Google Brain, Montr\u00e9al, Canada", "aff_domain": "microsoft.com; ; ; ", "email": "microsoft.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/gaunt17a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "Microsoft;Google", "aff_unique_dep": "Microsoft Research;Google Brain", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://brain.google.com", "aff_unique_abbr": "MSR;Google Brain", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Cambridge;Montr\u00e9al", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "United Kingdom;Canada" }, { "title": "Differentially Private Chi-squared Test by Unit Circle Mechanism", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/847", "id": "847", "author_site": "Kazuya Kakizaki, Kazuto Fukuchi, Jun Sakuma", "author": "Kazuya Kakizaki; Kazuto Fukuchi; Jun Sakuma", "abstract": "This paper develops differentially private mechanisms for $\\chi^2$ test of independence. While existing works put their effort into properly controlling the type-I error, in addition to that, we investigate the type-II error of differentially private mechanisms. Based on the analysis, we present unit circle mechanism: a novel differentially private mechanism based on the geometrical property of the test statistics. Compared to existing output perturbation mechanisms, our mechanism improves the dominated term of the type-II error from $O(1)$ to $O(\\exp(-\\sqrt{N}))$ where $N$ is the sample size. Furthermore, we introduce novel procedures for multiple $\\chi^2$ tests by incorporating the unit circle mechanism into the sparse vector technique and the exponential mechanism. These procedures can control the family-wise error rate (FWER) properly, which has never been attained by existing mechanisms.", "bibtex": "@InProceedings{pmlr-v70-kakizaki17a,\n title = \t {Differentially Private Chi-squared Test by Unit Circle Mechanism},\n author = {Kazuya Kakizaki and Kazuto Fukuchi and Jun Sakuma},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1761--1770},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kakizaki17a/kakizaki17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kakizaki17a.html},\n abstract = \t {This paper develops differentially private mechanisms for $\\chi^2$ test of independence. While existing works put their effort into properly controlling the type-I error, in addition to that, we investigate the type-II error of differentially private mechanisms. Based on the analysis, we present unit circle mechanism: a novel differentially private mechanism based on the geometrical property of the test statistics. Compared to existing output perturbation mechanisms, our mechanism improves the dominated term of the type-II error from $O(1)$ to $O(\\exp(-\\sqrt{N}))$ where $N$ is the sample size. Furthermore, we introduce novel procedures for multiple $\\chi^2$ tests by incorporating the unit circle mechanism into the sparse vector technique and the exponential mechanism. These procedures can control the family-wise error rate (FWER) properly, which has never been attained by existing mechanisms.}\n}", "pdf": "http://proceedings.mlr.press/v70/kakizaki17a/kakizaki17a.pdf", "supp": "", "pdf_size": 1255969, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12925966056432560764&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, University of Tsukuba; JST CREST; RIKEN Center for Advanced Intelligence Project", "aff_domain": "mdl.cs.tsukuba.ac.jp;mdl.cs.tsukuba.ac.jp;cs.tsukuba.ac.jp", "email": "mdl.cs.tsukuba.ac.jp;mdl.cs.tsukuba.ac.jp;cs.tsukuba.ac.jp", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kakizaki17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "University of Tsukuba;Japan Science and Technology Agency;RIKEN", "aff_unique_dep": "Department of Computer Science;CREST;Center for Advanced Intelligence Project", "aff_unique_url": "https://www.tsukuba.ac.jp;https://www.jst.go.jp;https://www.riken.jp/en/", "aff_unique_abbr": "UT;JST;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Japan" }, { "title": "Differentially Private Clustering in High-Dimensional Euclidean Spaces", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/756", "id": "756", "author_site": "Nina Balcan, Travis Dick, Yingyu Liang, Wenlong Mou, Hongyang Zhang", "author": "Maria-Florina Balcan; Travis Dick; Yingyu Liang; Wenlong Mou; Hongyang Zhang", "abstract": "We study the problem of clustering sensitive data while preserving the privacy of individuals represented in the dataset, which has broad applications in practical machine learning and data analysis tasks. Although the problem has been widely studied in the context of low-dimensional, discrete spaces, much remains unknown concerning private clustering in high-dimensional Euclidean spaces $\\mathbb{R}^d$. In this work, we give differentially private and efficient algorithms achieving strong guarantees for $k$-means and $k$-median clustering when $d=\\Omega(\\mathsf{polylog}(n))$. Our algorithm achieves clustering loss at most $\\log^3(n)\\mathsf{OPT}+\\mathsf{poly}(\\log n,d,k)$, advancing the state-of-the-art result of $\\sqrt{d}\\mathsf{OPT}+\\mathsf{poly}(\\log n,d^d,k^d)$. We also study the case where the data points are $s$-sparse and show that the clustering loss can scale logarithmically with $d$, i.e., $\\log^3(n)\\mathsf{OPT}+\\mathsf{poly}(\\log n,\\log d,k,s)$. Experiments on both synthetic and real datasets verify the effectiveness of the proposed method.", "bibtex": "@InProceedings{pmlr-v70-balcan17a,\n title = \t {Differentially Private Clustering in High-Dimensional {E}uclidean Spaces},\n author = {Maria-Florina Balcan and Travis Dick and Yingyu Liang and Wenlong Mou and Hongyang Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {322--331},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balcan17a/balcan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/balcan17a.html},\n abstract = \t {We study the problem of clustering sensitive data while preserving the privacy of individuals represented in the dataset, which has broad applications in practical machine learning and data analysis tasks. Although the problem has been widely studied in the context of low-dimensional, discrete spaces, much remains unknown concerning private clustering in high-dimensional Euclidean spaces $\\mathbb{R}^d$. In this work, we give differentially private and efficient algorithms achieving strong guarantees for $k$-means and $k$-median clustering when $d=\\Omega(\\mathsf{polylog}(n))$. Our algorithm achieves clustering loss at most $\\log^3(n)\\mathsf{OPT}+\\mathsf{poly}(\\log n,d,k)$, advancing the state-of-the-art result of $\\sqrt{d}\\mathsf{OPT}+\\mathsf{poly}(\\log n,d^d,k^d)$. We also study the case where the data points are $s$-sparse and show that the clustering loss can scale logarithmically with $d$, i.e., $\\log^3(n)\\mathsf{OPT}+\\mathsf{poly}(\\log n,\\log d,k,s)$. Experiments on both synthetic and real datasets verify the effectiveness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v70/balcan17a/balcan17a.pdf", "supp": "", "pdf_size": 328368, "gs_citation": 106, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7054980736536914784&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University; Carnegie Mellon University; Princeton University; Peking University; Carnegie Mellon University", "aff_domain": "pku.edu.cn; ; ; ; ", "email": "pku.edu.cn; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/balcan17a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "Carnegie Mellon University;Princeton University;Peking University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.cmu.edu;https://www.princeton.edu;http://www.pku.edu.cn", "aff_unique_abbr": "CMU;Princeton;Peking U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Differentially Private Learning of Undirected Graphical Models Using Collective Graphical Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/612", "id": "612", "author_site": "Garrett Bernstein, Ryan McKenna, Tao Sun, Daniel Sheldon, Michael Hay, Gerome Miklau", "author": "Garrett Bernstein; Ryan McKenna; Tao Sun; Daniel Sheldon; Michael Hay; Gerome Miklau", "abstract": "We investigate the problem of learning discrete graphical models in a differentially private way. Approaches to this problem range from privileged algorithms that conduct learning completely behind the privacy barrier to schemes that release private summary statistics paired with algorithms to learn parameters from those statistics. We show that the approach of releasing noisy sufficient statistics using the Laplace mechanism achieves a good trade-off between privacy, utility, and practicality. A naive learning algorithm that uses the noisy sufficient statistics \u201cas is\u201d outperforms general-purpose differentially private learning algorithms. However, it has three limitations: it ignores knowledge about the data generating process, rests on uncertain theoretical foundations, and exhibits certain pathologies. We develop a more principled approach that applies the formalism of collective graphical models to perform inference over the true sufficient statistics within an expectation-maximization framework. We show that this learns better models than competing approaches on both synthetic data and on real human mobility data used as a case study.", "bibtex": "@InProceedings{pmlr-v70-bernstein17a,\n title = \t {Differentially Private Learning of Undirected Graphical Models Using Collective Graphical Models},\n author = {Garrett Bernstein and Ryan McKenna and Tao Sun and Daniel Sheldon and Michael Hay and Gerome Miklau},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {478--487},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bernstein17a/bernstein17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bernstein17a.html},\n abstract = \t {We investigate the problem of learning discrete graphical models in a differentially private way. Approaches to this problem range from privileged algorithms that conduct learning completely behind the privacy barrier to schemes that release private summary statistics paired with algorithms to learn parameters from those statistics. We show that the approach of releasing noisy sufficient statistics using the Laplace mechanism achieves a good trade-off between privacy, utility, and practicality. A naive learning algorithm that uses the noisy sufficient statistics \u201cas is\u201d outperforms general-purpose differentially private learning algorithms. However, it has three limitations: it ignores knowledge about the data generating process, rests on uncertain theoretical foundations, and exhibits certain pathologies. We develop a more principled approach that applies the formalism of collective graphical models to perform inference over the true sufficient statistics within an expectation-maximization framework. We show that this learns better models than competing approaches on both synthetic data and on real human mobility data used as a case study.}\n}", "pdf": "http://proceedings.mlr.press/v70/bernstein17a/bernstein17a.pdf", "supp": "", "pdf_size": 4826201, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4161164035140532867&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Massachusetts Amherst; University of Massachusetts Amherst; University of Massachusetts Amherst; University of Massachusetts Amherst + Mount Holyoke College; Colgate University; University of Massachusetts Amherst", "aff_domain": "cs.umass.edu; ; ; ; ; ", "email": "cs.umass.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/bernstein17a.html", "aff_unique_index": "0;0;0;0+1;2;0", "aff_unique_norm": "University of Massachusetts Amherst;Mount Holyoke College;Colgate University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.umass.edu;https://www.mtholyoke.edu;https://www.colgate.edu", "aff_unique_abbr": "UMass Amherst;MHC;Colgate", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Amherst;", "aff_country_unique_index": "0;0;0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Differentially Private Ordinary Least Squares", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/611", "id": "611", "author": "Or Sheffet", "abstract": "Linear regression is one of the most prevalent techniques in machine learning; however, it is also common to use linear regression for its explanatory capabilities rather than label prediction. Ordinary Least Squares (OLS) is often used in statistics to establish a correlation between an attribute (e.g. gender) and a label (e.g. income) in the presence of other (potentially correlated) features. OLS assumes a particular model that randomly generates the data, and derives t-values \u2014 representing the likelihood of each real value to be the true correlation. Using t-values, OLS can release a confidence interval, which is an interval on the reals that is likely to contain the true correlation; and when this interval does not intersect the origin, we can reject the null hypothesis as it is likely that the true correlation is non-zero. Our work aims at achieving similar guarantees on data under differentially private estimators. First, we show that for well-spread data, the Gaussian Johnson-Lindenstrauss Transform (JLT) gives a very good approximation of t-values; secondly, when JLT approximates Ridge regression (linear regression with $l_2$-regularization) we derive, under certain conditions, confidence intervals using the projected data; lastly, we derive, under different conditions, confidence intervals for the \u201cAnalyze Gauss\u201d algorithm (Dwork et al 2014).", "bibtex": "@InProceedings{pmlr-v70-sheffet17a,\n title = \t {Differentially Private Ordinary Least Squares},\n author = {Or Sheffet},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3105--3114},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sheffet17a/sheffet17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sheffet17a.html},\n abstract = \t {Linear regression is one of the most prevalent techniques in machine learning; however, it is also common to use linear regression for its explanatory capabilities rather than label prediction. Ordinary Least Squares (OLS) is often used in statistics to establish a correlation between an attribute (e.g. gender) and a label (e.g. income) in the presence of other (potentially correlated) features. OLS assumes a particular model that randomly generates the data, and derives t-values \u2014 representing the likelihood of each real value to be the true correlation. Using t-values, OLS can release a confidence interval, which is an interval on the reals that is likely to contain the true correlation; and when this interval does not intersect the origin, we can reject the null hypothesis as it is likely that the true correlation is non-zero. Our work aims at achieving similar guarantees on data under differentially private estimators. First, we show that for well-spread data, the Gaussian Johnson-Lindenstrauss Transform (JLT) gives a very good approximation of t-values; secondly, when JLT approximates Ridge regression (linear regression with $l_2$-regularization) we derive, under certain conditions, confidence intervals using the projected data; lastly, we derive, under different conditions, confidence intervals for the \u201cAnalyze Gauss\u201d algorithm (Dwork et al 2014).}\n}", "pdf": "http://proceedings.mlr.press/v70/sheffet17a/sheffet17a.pdf", "supp": "", "pdf_size": 686622, "gs_citation": 155, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3491806797045984343&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Computing Science Dept., University of Alberta, Edmonton AB, Canada + Harvard University", "aff_domain": "ualberta.ca", "email": "ualberta.ca", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/sheffet17a.html", "aff_unique_index": "0+1", "aff_unique_norm": "University of Alberta;Harvard University", "aff_unique_dep": "Computing Science Dept.;", "aff_unique_url": "https://www.ualberta.ca;https://www.harvard.edu", "aff_unique_abbr": "UAlberta;Harvard", "aff_campus_unique_index": "0", "aff_campus_unique": "Edmonton;", "aff_country_unique_index": "0+1", "aff_country_unique": "Canada;United States" }, { "title": "Differentially Private Submodular Maximization: Data Summarization in Disguise", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/637", "id": "637", "author_site": "Marko Mitrovic, Mark Bun, Andreas Krause, Amin Karbasi", "author": "Marko Mitrovic; Mark Bun; Andreas Krause; Amin Karbasi", "abstract": "Many data summarization applications are captured by the general framework of submodular maximization. As a consequence, a wide range of efficient approximation algorithms have been developed. However, when such applications involve sensitive data about individuals, their privacy concerns are not automatically addressed. To remedy this problem, we propose a general and systematic study of differentially private submodular maximization. We present privacy-preserving algorithms for both monotone and non-monotone submodular maximization under cardinality, matroid, and p-extendible system constraints, with guarantees that are competitive with optimal. Along the way, we analyze a new algorithm for non-monotone submodular maximization, which is the first (even non-privately) to achieve a constant approximation ratio while running in linear time. We additionally provide two concrete experiments to validate the efficacy of these algorithms.", "bibtex": "@InProceedings{pmlr-v70-mitrovic17a,\n title = \t {Differentially Private Submodular Maximization: Data Summarization in Disguise},\n author = {Marko Mitrovic and Mark Bun and Andreas Krause and Amin Karbasi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2478--2487},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mitrovic17a/mitrovic17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mitrovic17a.html},\n abstract = \t {Many data summarization applications are captured by the general framework of submodular maximization. As a consequence, a wide range of efficient approximation algorithms have been developed. However, when such applications involve sensitive data about individuals, their privacy concerns are not automatically addressed. To remedy this problem, we propose a general and systematic study of differentially private submodular maximization. We present privacy-preserving algorithms for both monotone and non-monotone submodular maximization under cardinality, matroid, and p-extendible system constraints, with guarantees that are competitive with optimal. Along the way, we analyze a new algorithm for non-monotone submodular maximization, which is the first (even non-privately) to achieve a constant approximation ratio while running in linear time. We additionally provide two concrete experiments to validate the efficacy of these algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/mitrovic17a/mitrovic17a.pdf", "supp": "", "pdf_size": 4564873, "gs_citation": 55, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5373536662496584692&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Yale University; Princeton University; ETH Zurich; Yale University", "aff_domain": "yale.edu; ; ; ", "email": "yale.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/mitrovic17a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Yale University;Princeton University;ETH Zurich", "aff_unique_dep": ";;", "aff_unique_url": "https://www.yale.edu;https://www.princeton.edu;https://www.ethz.ch", "aff_unique_abbr": "Yale;Princeton;ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;Switzerland" }, { "title": "Discovering Discrete Latent Topics with Neural Variational Inference", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/776", "id": "776", "author_site": "Yishu Miao, Edward Grefenstette, Phil Blunsom", "author": "Yishu Miao; Edward Grefenstette; Phil Blunsom", "abstract": "Topic models have been widely explored as probabilistic generative models of documents. Traditional inference methods have sought closed-form derivations for updating the models, however as the expressiveness of these models grows, so does the difficulty of performing fast and accurate inference over their parameters. This paper presents alternative neural approaches to topic modelling by providing parameterisable distributions over topics which permit training by backpropagation in the framework of neural variational inference. In addition, with the help of a stick-breaking construction, we propose a recurrent network that is able to discover a notionally unbounded number of topics, analogous to Bayesian non-parametric topic models. Experimental results on the MXM Song Lyrics, 20NewsGroups and Reuters News datasets demonstrate the effectiveness and efficiency of these neural topic models.", "bibtex": "@InProceedings{pmlr-v70-miao17a,\n title = \t {Discovering Discrete Latent Topics with Neural Variational Inference},\n author = {Yishu Miao and Edward Grefenstette and Phil Blunsom},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2410--2419},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/miao17a/miao17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/miao17a.html},\n abstract = \t {Topic models have been widely explored as probabilistic generative models of documents. Traditional inference methods have sought closed-form derivations for updating the models, however as the expressiveness of these models grows, so does the difficulty of performing fast and accurate inference over their parameters. This paper presents alternative neural approaches to topic modelling by providing parameterisable distributions over topics which permit training by backpropagation in the framework of neural variational inference. In addition, with the help of a stick-breaking construction, we propose a recurrent network that is able to discover a notionally unbounded number of topics, analogous to Bayesian non-parametric topic models. Experimental results on the MXM Song Lyrics, 20NewsGroups and Reuters News datasets demonstrate the effectiveness and efficiency of these neural topic models.}\n}", "pdf": "http://proceedings.mlr.press/v70/miao17a/miao17a.pdf", "supp": "", "pdf_size": 2791613, "gs_citation": 399, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6929175424516039349&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Oxford; DeepMind; University of Oxford + DeepMind", "aff_domain": "cs.ox.ac.uk; ; ", "email": "cs.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/miao17a.html", "aff_unique_index": "0;1;0+1", "aff_unique_norm": "University of Oxford;DeepMind", "aff_unique_dep": ";", "aff_unique_url": "https://www.ox.ac.uk;https://deepmind.com", "aff_unique_abbr": "Oxford;DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Dissipativity Theory for Nesterov\u2019s Accelerated Method", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/891", "id": "891", "author_site": "Bin Hu, Laurent Lessard", "author": "Bin Hu; Laurent Lessard", "abstract": "In this paper, we adapt the control theoretic concept of dissipativity theory to provide a natural understanding of Nesterov\u2019s accelerated method. Our theory ties rigorous convergence rate analysis to the physically intuitive notion of energy dissipation. Moreover, dissipativity allows one to efficiently construct Lyapunov functions (either numerically or analytically) by solving a small semidefinite program. Using novel supply rate functions, we show how to recover known rate bounds for Nesterov\u2019s method and we generalize the approach to certify both linear and sublinear rates in a variety of settings. Finally, we link the continuous-time version of dissipativity to recent works on algorithm analysis that use discretizations of ordinary differential equations.", "bibtex": "@InProceedings{pmlr-v70-hu17a,\n title = \t {Dissipativity Theory for {N}esterov's Accelerated Method},\n author = {Bin Hu and Laurent Lessard},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1549--1557},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hu17a/hu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hu17a.html},\n abstract = \t {In this paper, we adapt the control theoretic concept of dissipativity theory to provide a natural understanding of Nesterov\u2019s accelerated method. Our theory ties rigorous convergence rate analysis to the physically intuitive notion of energy dissipation. Moreover, dissipativity allows one to efficiently construct Lyapunov functions (either numerically or analytically) by solving a small semidefinite program. Using novel supply rate functions, we show how to recover known rate bounds for Nesterov\u2019s method and we generalize the approach to certify both linear and sublinear rates in a variety of settings. Finally, we link the continuous-time version of dissipativity to recent works on algorithm analysis that use discretizations of ordinary differential equations.}\n}", "pdf": "http://proceedings.mlr.press/v70/hu17a/hu17a.pdf", "supp": "", "pdf_size": 240863, "gs_citation": 150, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14761061357485715998&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "University of Wisconsin\u2013Madison; University of Wisconsin\u2013Madison", "aff_domain": "wisc.edu; ", "email": "wisc.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/hu17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Wisconsin\u2013Madison", "aff_unique_dep": "", "aff_unique_url": "https://www.wisc.edu", "aff_unique_abbr": "UW\u2013Madison", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Madison", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Distributed Batch Gaussian Process Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/689", "id": "689", "author_site": "Erik Daxberger, Bryan Kian Hsiang Low", "author": "Erik A. Daxberger; Bryan Kian Hsiang Low", "abstract": "This paper presents a novel distributed batch Gaussian process upper confidence bound (DB-GP-UCB) algorithm for performing batch Bayesian optimization (BO) of highly complex, costly-to-evaluate black-box objective functions. In contrast to existing batch BO algorithms, DB-GP-UCB can jointly optimize a batch of inputs (as opposed to selecting the inputs of a batch one at a time) while still preserving scalability in the batch size. To realize this, we generalize GP-UCB to a new batch variant amenable to a Markov approximation, which can then be naturally formulated as a multi-agent distributed constraint optimization problem in order to fully exploit the efficiency of its state-of-the-art solvers for achieving linear time in the batch size. Our DB-GP-UCB algorithm offers practitioners the flexibility to trade off between the approximation quality and time efficiency by varying the Markov order. We provide a theoretical guarantee for the convergence rate of DB-GP-UCB via bounds on its cumulative regret. Empirical evaluation on synthetic benchmark objective functions and a real-world optimization problem shows that DB-GP-UCB outperforms the state-of-the-art batch BO algorithms.", "bibtex": "@InProceedings{pmlr-v70-daxberger17a,\n title = \t {Distributed Batch {G}aussian Process Optimization},\n author = {Erik A. Daxberger and Bryan Kian Hsiang Low},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {951--960},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/daxberger17a/daxberger17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/daxberger17a.html},\n abstract = \t {This paper presents a novel distributed batch Gaussian process upper confidence bound (DB-GP-UCB) algorithm for performing batch Bayesian optimization (BO) of highly complex, costly-to-evaluate black-box objective functions. In contrast to existing batch BO algorithms, DB-GP-UCB can jointly optimize a batch of inputs (as opposed to selecting the inputs of a batch one at a time) while still preserving scalability in the batch size. To realize this, we generalize GP-UCB to a new batch variant amenable to a Markov approximation, which can then be naturally formulated as a multi-agent distributed constraint optimization problem in order to fully exploit the efficiency of its state-of-the-art solvers for achieving linear time in the batch size. Our DB-GP-UCB algorithm offers practitioners the flexibility to trade off between the approximation quality and time efficiency by varying the Markov order. We provide a theoretical guarantee for the convergence rate of DB-GP-UCB via bounds on its cumulative regret. Empirical evaluation on synthetic benchmark objective functions and a real-world optimization problem shows that DB-GP-UCB outperforms the state-of-the-art batch BO algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/daxberger17a/daxberger17a.pdf", "supp": "", "pdf_size": 619271, "gs_citation": 60, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9308345920373936272&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Ludwig-Maximilians-Universit\u00e4t, Munich, Germany + National University of Singapore; Department of Computer Science, National University of Singapore, Republic of Singapore", "aff_domain": "lmu.de;comp.nus.edu.sg", "email": "lmu.de;comp.nus.edu.sg", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/daxberger17a.html", "aff_unique_index": "0+1;1", "aff_unique_norm": "Ludwig-Maximilians-Universit\u00e4t;National University of Singapore", "aff_unique_dep": ";", "aff_unique_url": "https://www.lmu.de;https://www.nus.edu.sg", "aff_unique_abbr": "LMU;NUS", "aff_campus_unique_index": "0", "aff_campus_unique": "Munich;", "aff_country_unique_index": "0+1;1", "aff_country_unique": "Germany;Singapore" }, { "title": "Distributed Mean Estimation with Limited Communication", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/714", "id": "714", "author_site": "Ananda Theertha Suresh, Felix Xinnan Yu, Sanjiv Kumar, Brendan McMahan", "author": "Ananda Theertha Suresh; Felix X. Yu; Sanjiv Kumar; H. Brendan McMahan", "abstract": "Motivated by the need for distributed learning and optimization algorithms with low communication cost, we study communication efficient algorithms for distributed mean estimation. Unlike previous works, we make no probabilistic assumptions on the data. We first show that for $d$ dimensional data with $n$ clients, a naive stochastic rounding approach yields a mean squared error (MSE) of $\\Theta(d/n)$ and uses a constant number of bits per dimension per client. We then extend this naive algorithm in two ways: we show that applying a structured random rotation before quantization reduces the error to $\\mathcal{O}((\\log d)/n)$ and a better coding strategy further reduces the error to $\\mathcal{O}(1/n)$. We also show that the latter coding strategy is optimal up to a constant in the minimax sense i.e., it achieves the best MSE for a given communication cost. We finally demonstrate the practicality of our algorithms by applying them to distributed Lloyd\u2019s algorithm for k-means and power iteration for PCA.", "bibtex": "@InProceedings{pmlr-v70-suresh17a,\n title = \t {Distributed Mean Estimation with Limited Communication},\n author = {Ananda Theertha Suresh and Felix X. Yu and Sanjiv Kumar and H. Brendan McMahan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3329--3337},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/suresh17a/suresh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/suresh17a.html},\n abstract = \t {Motivated by the need for distributed learning and optimization algorithms with low communication cost, we study communication efficient algorithms for distributed mean estimation. Unlike previous works, we make no probabilistic assumptions on the data. We first show that for $d$ dimensional data with $n$ clients, a naive stochastic rounding approach yields a mean squared error (MSE) of $\\Theta(d/n)$ and uses a constant number of bits per dimension per client. We then extend this naive algorithm in two ways: we show that applying a structured random rotation before quantization reduces the error to $\\mathcal{O}((\\log d)/n)$ and a better coding strategy further reduces the error to $\\mathcal{O}(1/n)$. We also show that the latter coding strategy is optimal up to a constant in the minimax sense i.e., it achieves the best MSE for a given communication cost. We finally demonstrate the practicality of our algorithms by applying them to distributed Lloyd\u2019s algorithm for k-means and power iteration for PCA.}\n}", "pdf": "http://proceedings.mlr.press/v70/suresh17a/suresh17a.pdf", "supp": "", "pdf_size": 514057, "gs_citation": 424, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11668081682021556888&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, New York, NY, USA; Google Research, Seattle, WA, USA", "aff_domain": "google.com; ; ; ", "email": "google.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/suresh17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Research", "aff_unique_url": "https://research.google", "aff_unique_abbr": "Google Research", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "New York;Seattle", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Distributed and Provably Good Seedings for k-Means in Constant Rounds", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/472", "id": "472", "author_site": "Olivier Bachem, Mario Lucic, Andreas Krause", "author": "Olivier Bachem; Mario Lucic; Andreas Krause", "abstract": "The k-Means++ algorithm is the state of the art algorithm to solve k-Means clustering problems as the computed clusterings are O(log k) competitive in expectation. However, its seeding step requires k inherently sequential passes through the full data set making it hard to scale to massive data sets. The standard remedy is to use the k-Means|| algorithm which reduces the number of sequential rounds and is thus suitable for a distributed setting. In this paper, we provide a novel analysis of the k-Means|| algorithm that bounds the expected solution quality for any number of rounds and oversampling factors greater than k, the two parameters one needs to choose in practice. In particular, we show that k-Means|| provides", "bibtex": "@InProceedings{pmlr-v70-bachem17b,\n title = \t {Distributed and Provably Good Seedings for k-Means in Constant Rounds},\n author = {Olivier Bachem and Mario Lucic and Andreas Krause},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {292--300},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bachem17b/bachem17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/bachem17b.html},\n abstract = \t {The k-Means++ algorithm is the state of the art algorithm to solve k-Means clustering problems as the computed clusterings are O(log k) competitive in expectation. However, its seeding step requires k inherently sequential passes through the full data set making it hard to scale to massive data sets. The standard remedy is to use the k-Means|| algorithm which reduces the number of sequential rounds and is thus suitable for a distributed setting. In this paper, we provide a novel analysis of the k-Means|| algorithm that bounds the expected solution quality for any number of rounds and oversampling factors greater than k, the two parameters one needs to choose in practice. In particular, we show that k-Means|| provides", "pdf": "http://proceedings.mlr.press/v70/bachem17b/bachem17b.pdf", "supp": "", "pdf_size": 307744, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=715181370893175967&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich", "aff_domain": "inf.ethz.ch; ; ", "email": "inf.ethz.ch; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/bachem17b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Doubly Accelerated Methods for Faster CCA and Generalized Eigendecomposition", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/485", "id": "485", "author_site": "Zeyuan Allen-Zhu, Yuanzhi Li", "author": "Zeyuan Allen-Zhu; Yuanzhi Li", "abstract": "We study k-GenEV, the problem of finding the top k generalized eigenvectors, and k-CCA, the problem of finding the top k vectors in canonical-correlation analysis. We propose algorithms LazyEV and LazyCCA to solve the two problems with running times linearly dependent on the input size and on k. Furthermore, our algorithms are", "bibtex": "@InProceedings{pmlr-v70-allen-zhu17b,\n title = \t {Doubly Accelerated Methods for Faster {CCA} and Generalized Eigendecomposition},\n author = {Zeyuan Allen-Zhu and Yuanzhi Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {98--106},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allen-zhu17b/allen-zhu17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/allen-zhu17b.html},\n abstract = \t {We study k-GenEV, the problem of finding the top k generalized eigenvectors, and k-CCA, the problem of finding the top k vectors in canonical-correlation analysis. We propose algorithms LazyEV and LazyCCA to solve the two problems with running times linearly dependent on the input size and on k. Furthermore, our algorithms are", "pdf": "http://proceedings.mlr.press/v70/allen-zhu17b/allen-zhu17b.pdf", "supp": "", "pdf_size": 457600, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13978223214242839258&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Microsoft Research; Princeton University", "aff_domain": "csail.mit.edu;cs.princeton.edu", "email": "csail.mit.edu;cs.princeton.edu", "github": "", "project": "http://arxiv.org/abs/1607.06017", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/allen-zhu17b.html", "aff_unique_index": "0;1", "aff_unique_norm": "Microsoft;Princeton University", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.princeton.edu", "aff_unique_abbr": "MSR;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Doubly Greedy Primal-Dual Coordinate Descent for Sparse Empirical Risk Minimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/864", "id": "864", "author_site": "Qi Lei, En-Hsu Yen, Chao-Yuan Wu, Inderjit Dhillon, Pradeep Ravikumar", "author": "Qi Lei; Ian En-Hsu Yen; Chao-yuan Wu; Inderjit S. Dhillon; Pradeep Ravikumar", "abstract": "We consider the popular problem of sparse empirical risk minimization with linear predictors and a large number of both features and observations. With a convex-concave saddle point objective reformulation, we propose a Doubly Greedy Primal-Dual Coordinate Descent algorithm that is able to exploit sparsity in both primal and dual variables. It enjoys a low cost per iteration and our theoretical analysis shows that it converges linearly with a good iteration complexity, provided that the set of primal variables is sparse. We then extend this algorithm further to leverage active sets. The resulting new algorithm is even faster, and experiments on large-scale Multi-class data sets show that our algorithm achieves up to 30 times speedup on several state-of-the-art optimization methods.", "bibtex": "@InProceedings{pmlr-v70-lei17b,\n title = \t {Doubly Greedy Primal-Dual Coordinate Descent for Sparse Empirical Risk Minimization},\n author = {Qi Lei and Ian En-Hsu Yen and Chao-yuan Wu and Inderjit S. Dhillon and Pradeep Ravikumar},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2034--2042},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lei17b/lei17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/lei17b.html},\n abstract = \t {We consider the popular problem of sparse empirical risk minimization with linear predictors and a large number of both features and observations. With a convex-concave saddle point objective reformulation, we propose a Doubly Greedy Primal-Dual Coordinate Descent algorithm that is able to exploit sparsity in both primal and dual variables. It enjoys a low cost per iteration and our theoretical analysis shows that it converges linearly with a good iteration complexity, provided that the set of primal variables is sparse. We then extend this algorithm further to leverage active sets. The resulting new algorithm is even faster, and experiments on large-scale Multi-class data sets show that our algorithm achieves up to 30 times speedup on several state-of-the-art optimization methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/lei17b/lei17b.pdf", "supp": "", "pdf_size": 1040567, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17949617717120395745&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of ICES, University of Texas, Austin; Department of CS, Carnegie Mellon University, Pittsburgh; Department of CS, University of Texas, Austin; Department of ICES, University of Texas, Austin + Amazon/A9, Palo Alto; Department of CS, Carnegie Mellon University, Pittsburgh", "aff_domain": "ices.utexas.edu;cs.cmu.edu; ;ices.utexas.edu;cs.cmu.edu", "email": "ices.utexas.edu;cs.cmu.edu; ;ices.utexas.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/lei17b.html", "aff_unique_index": "0;1;0;0+2;1", "aff_unique_norm": "University of Texas at Austin;Carnegie Mellon University;Amazon", "aff_unique_dep": "Department of ICES;Department of CS;Amazon", "aff_unique_url": "https://www.utexas.edu;https://www.cmu.edu;https://www.amazon.com", "aff_unique_abbr": "UT Austin;CMU;Amazon", "aff_campus_unique_index": "0;1;0;0+2;1", "aff_campus_unique": "Austin;Pittsburgh;Palo Alto", "aff_country_unique_index": "0;0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "Dropout Inference in Bayesian Neural Networks with Alpha-divergences", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/539", "id": "539", "author_site": "Yingzhen Li, Yarin Gal", "author": "Yingzhen Li; Yarin Gal", "abstract": "To obtain uncertainty estimates with real-world Bayesian deep learning models, practical inference approximations are needed. Dropout variational inference (VI) for example has been used for machine vision and medical applications, but VI can severely underestimates model uncertainty. Alpha-divergences are alternative divergences to VI\u2019s KL objective, which are able to avoid VI\u2019s uncertainty underestimation. But these are hard to use in practice: existing techniques can only use Gaussian approximating distributions, and require existing models to be changed radically, thus are of limited use for practitioners. We propose a re-parametrisation of the alpha-divergence objectives, deriving a simple inference technique which, together with dropout, can be easily implemented with existing models by simply changing the loss of the model. We demonstrate improved uncertainty estimates and accuracy compared to VI in dropout networks. We study our model\u2019s epistemic uncertainty far away from the data using adversarial images, showing that these can be distinguished from non-adversarial images by examining our model\u2019s uncertainty.", "bibtex": "@InProceedings{pmlr-v70-li17a,\n title = \t {Dropout Inference in {B}ayesian Neural Networks with Alpha-divergences},\n author = {Yingzhen Li and Yarin Gal},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2052--2061},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17a/li17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17a.html},\n abstract = \t {To obtain uncertainty estimates with real-world Bayesian deep learning models, practical inference approximations are needed. Dropout variational inference (VI) for example has been used for machine vision and medical applications, but VI can severely underestimates model uncertainty. Alpha-divergences are alternative divergences to VI\u2019s KL objective, which are able to avoid VI\u2019s uncertainty underestimation. But these are hard to use in practice: existing techniques can only use Gaussian approximating distributions, and require existing models to be changed radically, thus are of limited use for practitioners. We propose a re-parametrisation of the alpha-divergence objectives, deriving a simple inference technique which, together with dropout, can be easily implemented with existing models by simply changing the loss of the model. We demonstrate improved uncertainty estimates and accuracy compared to VI in dropout networks. We study our model\u2019s epistemic uncertainty far away from the data using adversarial images, showing that these can be distinguished from non-adversarial images by examining our model\u2019s uncertainty.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17a/li17a.pdf", "supp": "", "pdf_size": 1105815, "gs_citation": 259, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=400434559794511102&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Cambridge, UK+The Alan Turing Institute, UK; University of Cambridge, UK+The Alan Turing Institute, UK", "aff_domain": "cam.ac.uk; ", "email": "cam.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/li17a.html", "aff_unique_index": "0+1;0+1", "aff_unique_norm": "University of Cambridge;Alan Turing Institute", "aff_unique_dep": ";", "aff_unique_url": "https://www.cam.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "Cambridge;ATI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Dual Iterative Hard Thresholding: From Non-convex Sparse Minimization to Non-smooth Concave Maximization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/525", "id": "525", "author_site": "Bo Liu, Xiaotong Yuan, Lezi Wang, Qingshan Liu, Dimitris Metaxas", "author": "Bo Liu; Xiao-Tong Yuan; Lezi Wang; Qingshan Liu; Dimitris N. Metaxas", "abstract": "Iterative Hard Thresholding (IHT) is a class of projected gradient descent methods for optimizing sparsity-constrained minimization models, with the best known efficiency and scalability in practice. As far as we know, the existing IHT-style methods are designed for sparse minimization in primal form. It remains open to explore duality theory and algorithms in such a non-convex and NP-hard setting. In this article, we bridge the gap by establishing a duality theory for sparsity-constrained minimization with $\\ell_2$-regularized objective and proposing an IHT-style algorithm for dual maximization. Our sparse duality theory provides a set of sufficient and necessary conditions under which the original NP-hard/non-convex problem can be equivalently solved in a dual space. The proposed dual IHT algorithm is a super-gradient method for maximizing the non-smooth dual objective. An interesting finding is that the sparse recovery performance of dual IHT is invariant to the Restricted Isometry Property (RIP), which is required by all the existing primal IHT without sparsity relaxation. Moreover, a stochastic variant of dual IHT is proposed for large-scale stochastic optimization. Numerical results demonstrate that dual IHT algorithms can achieve more accurate model estimation given small number of training data and have higher computational efficiency than the state-of-the-art primal IHT-style algorithms.", "bibtex": "@InProceedings{pmlr-v70-liu17e,\n title = \t {Dual Iterative Hard Thresholding: From Non-convex Sparse Minimization to Non-smooth Concave Maximization},\n author = {Bo Liu and Xiao-Tong Yuan and Lezi Wang and Qingshan Liu and Dimitris N. Metaxas},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2179--2187},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17e/liu17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17e.html},\n abstract = \t {Iterative Hard Thresholding (IHT) is a class of projected gradient descent methods for optimizing sparsity-constrained minimization models, with the best known efficiency and scalability in practice. As far as we know, the existing IHT-style methods are designed for sparse minimization in primal form. It remains open to explore duality theory and algorithms in such a non-convex and NP-hard setting. In this article, we bridge the gap by establishing a duality theory for sparsity-constrained minimization with $\\ell_2$-regularized objective and proposing an IHT-style algorithm for dual maximization. Our sparse duality theory provides a set of sufficient and necessary conditions under which the original NP-hard/non-convex problem can be equivalently solved in a dual space. The proposed dual IHT algorithm is a super-gradient method for maximizing the non-smooth dual objective. An interesting finding is that the sparse recovery performance of dual IHT is invariant to the Restricted Isometry Property (RIP), which is required by all the existing primal IHT without sparsity relaxation. Moreover, a stochastic variant of dual IHT is proposed for large-scale stochastic optimization. Numerical results demonstrate that dual IHT algorithms can achieve more accurate model estimation given small number of training data and have higher computational efficiency than the state-of-the-art primal IHT-style algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/liu17e/liu17e.pdf", "supp": "", "pdf_size": 400462, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9829962547104001941&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of CS, Rutgers University, Piscataway, NJ, 08854, USA; B-DAT Lab, Nanjing University of Information Science & Technology, Nanjing, Jiangsu, 210044, China; Department of CS, Rutgers University, Piscataway, NJ, 08854, USA; B-DAT Lab, Nanjing University of Information Science & Technology, Nanjing, Jiangsu, 210044, China; Department of CS, Rutgers University, Piscataway, NJ, 08854, USA", "aff_domain": "cs.rutgers.edu; ; ; ; ", "email": "cs.rutgers.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/liu17e.html", "aff_unique_index": "0;1;0;1;0", "aff_unique_norm": "Rutgers University;Nanjing University of Information Science & Technology", "aff_unique_dep": "Department of CS;B-DAT Lab", "aff_unique_url": "https://www.rutgers.edu;", "aff_unique_abbr": "Rutgers;", "aff_campus_unique_index": "0;1;0;1;0", "aff_campus_unique": "Piscataway;Nanjing", "aff_country_unique_index": "0;1;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Dual Supervised Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/763", "id": "763", "author_site": "Yingce Xia, Tao Qin, Wei Chen, Jiang Bian, Nenghai Yu, Tie-Yan Liu", "author": "Yingce Xia; Tao Qin; Wei Chen; Jiang Bian; Nenghai Yu; Tie-Yan Liu", "abstract": "Many supervised learning tasks are emerged in dual forms, e.g., English-to-French translation vs. French-to-English translation, speech recognition vs. text to speech, and image classification vs. image generation. Two dual tasks have intrinsic connections with each other due to the probabilistic correlation between their models. This connection is, however, not effectively utilized today, since people usually train the models of two dual tasks separately and independently. In this work, we propose training the models of two dual tasks simultaneously, and explicitly exploiting the probabilistic correlation between them to regularize the training process. For ease of reference, we call the proposed approach dual supervised learning. We demonstrate that dual supervised learning can improve the practical performances of both tasks, for various applications including machine translation, image processing, and sentiment analysis.", "bibtex": "@InProceedings{pmlr-v70-xia17a,\n title = \t {Dual Supervised Learning},\n author = {Yingce Xia and Tao Qin and Wei Chen and Jiang Bian and Nenghai Yu and Tie-Yan Liu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3789--3798},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xia17a/xia17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/xia17a.html},\n abstract = \t {Many supervised learning tasks are emerged in dual forms, e.g., English-to-French translation vs. French-to-English translation, speech recognition vs. text to speech, and image classification vs. image generation. Two dual tasks have intrinsic connections with each other due to the probabilistic correlation between their models. This connection is, however, not effectively utilized today, since people usually train the models of two dual tasks separately and independently. In this work, we propose training the models of two dual tasks simultaneously, and explicitly exploiting the probabilistic correlation between them to regularize the training process. For ease of reference, we call the proposed approach dual supervised learning. We demonstrate that dual supervised learning can improve the practical performances of both tasks, for various applications including machine translation, image processing, and sentiment analysis.}\n}", "pdf": "http://proceedings.mlr.press/v70/xia17a/xia17a.pdf", "supp": "", "pdf_size": 1084838, "gs_citation": 184, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17907972833117899731&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "School of Information Science and Technology, University of Science and Technology of China, Hefei, Anhui, China; Microsoft Research, Beijing, China; Microsoft Research, Beijing, China; Microsoft Research, Beijing, China; School of Information Science and Technology, University of Science and Technology of China, Hefei, Anhui, China; Microsoft Research, Beijing, China", "aff_domain": "ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", "email": "ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/xia17a.html", "aff_unique_index": "0;1;1;1;0;1", "aff_unique_norm": "University of Science and Technology of China;Microsoft", "aff_unique_dep": "School of Information Science and Technology;Microsoft Research", "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/microsoft-research-asia", "aff_unique_abbr": "USTC;MSR", "aff_campus_unique_index": "0;1;1;1;0;1", "aff_campus_unique": "Hefei;Beijing", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "China" }, { "title": "Dueling Bandits with Weak Regret", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/587", "id": "587", "author_site": "Bangrui Chen, Peter I Frazier", "author": "Bangrui Chen; Peter I. Frazier", "abstract": "We consider online content recommendation with implicit feedback through pairwise comparisons, formalized as the so-called dueling bandit problem. We study the dueling bandit problem in the Condorcet winner setting, and consider two notions of regret: the more well-studied strong regret, which is 0 only when both arms pulled are the Condorcet winner; and the less well-studied weak regret, which is 0 if either arm pulled is the Condorcet winner. We propose a new algorithm for this problem, Winner Stays (WS), with variations for each kind of regret: WS for weak regret (WS-W) has expected cumulative weak regret that is $O(N^2)$, and $O(N\\log(N))$ if arms have a total order; WS for strong regret (WS-S) has expected cumulative strong regret of $O(N^2 + N \\log(T))$, and $O(N\\log(N)+N\\log(T))$ if arms have a total order. WS-W is the first dueling bandit algorithm with weak regret that is constant in time. WS is simple to compute, even for problems with many arms, and we demonstrate through numerical experiments on simulated and real data that WS has significantly smaller regret than existing algorithms in both the weak- and strong-regret settings.", "bibtex": "@InProceedings{pmlr-v70-chen17c,\n title = \t {Dueling Bandits with Weak Regret},\n author = {Bangrui Chen and Peter I. Frazier},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {731--739},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17c/chen17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17c.html},\n abstract = \t {We consider online content recommendation with implicit feedback through pairwise comparisons, formalized as the so-called dueling bandit problem. We study the dueling bandit problem in the Condorcet winner setting, and consider two notions of regret: the more well-studied strong regret, which is 0 only when both arms pulled are the Condorcet winner; and the less well-studied weak regret, which is 0 if either arm pulled is the Condorcet winner. We propose a new algorithm for this problem, Winner Stays (WS), with variations for each kind of regret: WS for weak regret (WS-W) has expected cumulative weak regret that is $O(N^2)$, and $O(N\\log(N))$ if arms have a total order; WS for strong regret (WS-S) has expected cumulative strong regret of $O(N^2 + N \\log(T))$, and $O(N\\log(N)+N\\log(T))$ if arms have a total order. WS-W is the first dueling bandit algorithm with weak regret that is constant in time. WS is simple to compute, even for problems with many arms, and we demonstrate through numerical experiments on simulated and real data that WS has significantly smaller regret than existing algorithms in both the weak- and strong-regret settings.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17c/chen17c.pdf", "supp": "", "pdf_size": 1203599, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14663851389266565529&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Cornell University; Cornell University", "aff_domain": "cornell.edu;cornell.edu", "email": "cornell.edu;cornell.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/chen17c.html", "aff_unique_index": "0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Dynamic Word Embeddings", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/497", "id": "497", "author_site": "Robert Bamler, Stephan Mandt", "author": "Robert Bamler; Stephan Mandt", "abstract": "We present a probabilistic language model for time-stamped text data which tracks the semantic evolution of individual words over time. The model represents words and contexts by latent trajectories in an embedding space. At each moment in time, the embedding vectors are inferred from a probabilistic version of word2vec [Mikolov et al., 2013]. These embedding vectors are connected in time through a latent diffusion process. We describe two scalable variational inference algorithms\u2013skip-gram smoothing and skip-gram filtering\u2013that allow us to train the model jointly over all times; thus learning on all data while simultaneously allowing word and context vectors to drift. Experimental results on three different corpora demonstrate that our dynamic model infers word embedding trajectories that are more interpretable and lead to higher predictive likelihoods than competing methods that are based on static models trained separately on time slices.", "bibtex": "@InProceedings{pmlr-v70-bamler17a,\n title = \t {Dynamic Word Embeddings},\n author = {Robert Bamler and Stephan Mandt},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {380--389},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bamler17a/bamler17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bamler17a.html},\n abstract = \t {We present a probabilistic language model for time-stamped text data which tracks the semantic evolution of individual words over time. The model represents words and contexts by latent trajectories in an embedding space. At each moment in time, the embedding vectors are inferred from a probabilistic version of word2vec [Mikolov et al., 2013]. These embedding vectors are connected in time through a latent diffusion process. We describe two scalable variational inference algorithms\u2013skip-gram smoothing and skip-gram filtering\u2013that allow us to train the model jointly over all times; thus learning on all data while simultaneously allowing word and context vectors to drift. Experimental results on three different corpora demonstrate that our dynamic model infers word embedding trajectories that are more interpretable and lead to higher predictive likelihoods than competing methods that are based on static models trained separately on time slices.}\n}", "pdf": "http://proceedings.mlr.press/v70/bamler17a/bamler17a.pdf", "supp": "", "pdf_size": 1644995, "gs_citation": 486, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15579482018491697939&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Disney Research; Disney Research", "aff_domain": "disneyresearch.com;disneyresearch.com", "email": "disneyresearch.com;disneyresearch.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/bamler17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Disney Research", "aff_unique_dep": "", "aff_unique_url": "https://research.disney.com", "aff_unique_abbr": "Disney Research", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Efficient Distributed Learning with Sparsity", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/584", "id": "584", "author_site": "Jialei Wang, Mladen Kolar, Nati Srebro, Tong Zhang", "author": "Jialei Wang; Mladen Kolar; Nathan Srebro; Tong Zhang", "abstract": "We propose a novel, efficient approach for distributed sparse learning with observations randomly partitioned across machines. In each round of the proposed method, worker machines compute the gradient of the loss on local data and the master machine solves a shifted $\\ell_1$ regularized loss minimization problem. After a number of communication rounds that scales only logarithmically with the number of machines, and independent of other parameters of the problem, the proposed approach provably matches the estimation error bound of centralized methods.", "bibtex": "@InProceedings{pmlr-v70-wang17f,\n title = \t {Efficient Distributed Learning with Sparsity},\n author = {Jialei Wang and Mladen Kolar and Nathan Srebro and Tong Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3636--3645},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17f/wang17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17f.html},\n abstract = \t {We propose a novel, efficient approach for distributed sparse learning with observations randomly partitioned across machines. In each round of the proposed method, worker machines compute the gradient of the loss on local data and the master machine solves a shifted $\\ell_1$ regularized loss minimization problem. After a number of communication rounds that scales only logarithmically with the number of machines, and independent of other parameters of the problem, the proposed approach provably matches the estimation error bound of centralized methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17f/wang17f.pdf", "supp": "", "pdf_size": 471668, "gs_citation": 196, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5354408309594215367&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Chicago, USA; University of Chicago, USA; Toyota Technological Insti-tute at Chicago, USA; Tencent AI Lab, China", "aff_domain": "uchicago.edu;chicagobooth.edu;ttic.edu;tongzhang-ml.org", "email": "uchicago.edu;chicagobooth.edu;ttic.edu;tongzhang-ml.org", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/wang17f.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "University of Chicago;Toyota Technological Institute at Chicago;Tencent", "aff_unique_dep": ";;Tencent AI Lab", "aff_unique_url": "https://www.uchicago.edu;https://www.tti-chicago.org;https://ai.tencent.com", "aff_unique_abbr": "UChicago;TTI Chicago;Tencent AI Lab", "aff_campus_unique_index": "1", "aff_campus_unique": ";Chicago", "aff_country_unique_index": "0;0;0;1", "aff_country_unique": "United States;China" }, { "title": "Efficient Nonmyopic Active Search", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/553", "id": "553", "author_site": "Shali Jiang, Luiz Gustavo Malkomes, Geoff Converse, Alyssa Shofner, Benjamin Moseley, Roman Garnett", "author": "Shali Jiang; Gustavo Malkomes; Geoff Converse; Alyssa Shofner; Benjamin Moseley; Roman Garnett", "abstract": "Active search is an active learning setting with the goal of identifying as many members of a given class as possible under a labeling budget. In this work, we first establish a theoretical hardness of active search, proving that no polynomial-time policy can achieve a constant factor approximation ratio with respect to the expected utility of the optimal policy. We also propose a novel, computationally efficient active search policy achieving exceptional performance on several real-world tasks. Our policy is nonmyopic, always considering the entire remaining search budget. It also automatically and dynamically balances exploration and exploitation consistent with the remaining budget, without relying on a parameter to control this tradeoff. We conduct experiments on diverse datasets from several domains: drug discovery, materials science, and a citation network. Our efficient nonmyopic policy recovers significantly more valuable points with the same budget than several alternatives from the literature, including myopic approximations to the optimal policy.", "bibtex": "@InProceedings{pmlr-v70-jiang17d,\n title = \t {Efficient Nonmyopic Active Search},\n author = {Shali Jiang and Gustavo Malkomes and Geoff Converse and Alyssa Shofner and Benjamin Moseley and Roman Garnett},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1714--1723},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jiang17d/jiang17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/jiang17d.html},\n abstract = \t {Active search is an active learning setting with the goal of identifying as many members of a given class as possible under a labeling budget. In this work, we first establish a theoretical hardness of active search, proving that no polynomial-time policy can achieve a constant factor approximation ratio with respect to the expected utility of the optimal policy. We also propose a novel, computationally efficient active search policy achieving exceptional performance on several real-world tasks. Our policy is nonmyopic, always considering the entire remaining search budget. It also automatically and dynamically balances exploration and exploitation consistent with the remaining budget, without relying on a parameter to control this tradeoff. We conduct experiments on diverse datasets from several domains: drug discovery, materials science, and a citation network. Our efficient nonmyopic policy recovers significantly more valuable points with the same budget than several alternatives from the literature, including myopic approximations to the optimal policy.}\n}", "pdf": "http://proceedings.mlr.press/v70/jiang17d/jiang17d.pdf", "supp": "", "pdf_size": 394824, "gs_citation": 68, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4318694792816447423&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Washington University in St. Louis; Washington University in St. Louis; Simpson College; University of South Carolina; Washington University in St. Louis; Washington University in St. Louis", "aff_domain": "wustl.edu; ; ; ; ; ", "email": "wustl.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/jiang17d.html", "aff_unique_index": "0;0;1;2;0;0", "aff_unique_norm": "Washington University in St. Louis;Simpson College;University of South Carolina", "aff_unique_dep": ";;", "aff_unique_url": "https://wustl.edu;https://www.simpson.edu;https://www.sc.edu", "aff_unique_abbr": "WashU;Simpson;USC", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "St. Louis;", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Efficient Online Bandit Multiclass Learning with $\\tilde{O}(\\sqrt{T})$ Regret", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/557", "id": "557", "author_site": "Alina Beygelzimer, Francesco Orabona, Chicheng Zhang", "author": "Alina Beygelzimer; Francesco Orabona; Chicheng Zhang", "abstract": "We present an efficient second-order algorithm with $\\tilde{O}(1/\\eta \\sqrt{T})$ regret for the bandit online multiclass problem. The regret bound holds simultaneously with respect to a family of loss functions parameterized by $\\eta$, ranging from hinge loss ($\\eta=0$) to squared hinge loss ($\\eta=1$). This provides a solution to the open problem of (Abernethy, J. and Rakhlin, A. An efficient bandit algorithm for $\\sqrt{T}$-regret in online multiclass prediction? In COLT, 2009). We test our algorithm experimentally, showing that it performs favorably against earlier algorithms.", "bibtex": "@InProceedings{pmlr-v70-beygelzimer17a,\n title = \t {Efficient Online Bandit Multiclass Learning with $\\tilde{O}(\\sqrt{T})$ Regret},\n author = {Alina Beygelzimer and Francesco Orabona and Chicheng Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {488--497},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/beygelzimer17a/beygelzimer17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/beygelzimer17a.html},\n abstract = \t {We present an efficient second-order algorithm with $\\tilde{O}(1/\\eta \\sqrt{T})$ regret for the bandit online multiclass problem. The regret bound holds simultaneously with respect to a family of loss functions parameterized by $\\eta$, ranging from hinge loss ($\\eta=0$) to squared hinge loss ($\\eta=1$). This provides a solution to the open problem of (Abernethy, J. and Rakhlin, A. An efficient bandit algorithm for $\\sqrt{T}$-regret in online multiclass prediction? In COLT, 2009). We test our algorithm experimentally, showing that it performs favorably against earlier algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/beygelzimer17a/beygelzimer17a.pdf", "supp": "", "pdf_size": 670655, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "Yahoo Research, New York, NY; Stony Brook University, Stony Brook, NY; University of California, San Diego, La Jolla, CA", "aff_domain": "yahoo-inc.com;orabona.com;ucsd.edu", "email": "yahoo-inc.com;orabona.com;ucsd.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/beygelzimer17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Yahoo Research;Stony Brook University;University of California, San Diego", "aff_unique_dep": ";;", "aff_unique_url": "https://research.yahoo.com;https://www.stonybrook.edu;https://www.ucsd.edu", "aff_unique_abbr": "Yahoo Res.;SBU;UCSD", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "New York;Stony Brook;La Jolla", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Efficient Orthogonal Parametrisation of Recurrent Neural Networks Using Householder Reflections", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/670", "id": "670", "author_site": "zakaria mhammedi, Andrew Hellicar, James Bailey, Ashfaqur Rahman", "author": "Zakaria Mhammedi; Andrew Hellicar; Ashfaqur Rahman; James Bailey", "abstract": "The problem of learning long-term dependencies in sequences using Recurrent Neural Networks (RNNs) is still a major challenge. Recent methods have been suggested to solve this problem by constraining the transition matrix to be unitary during training which ensures that its norm is equal to one and prevents exploding gradients. These methods either have limited expressiveness or scale poorly with the size of the network when compared with the simple RNN case, especially when using stochastic gradient descent with a small mini-batch size. Our contributions are as follows; we first show that constraining the transition matrix to be unitary is a special case of an orthogonal constraint. Then we present a new parametrisation of the transition matrix which allows efficient training of an RNN while ensuring that the matrix is always orthogonal. Our results show that the orthogonal constraint on the transition matrix applied through our parametrisation gives similar benefits to the unitary constraint, without the time complexity limitations.", "bibtex": "@InProceedings{pmlr-v70-mhammedi17a,\n title = \t {Efficient Orthogonal Parametrisation of Recurrent Neural Networks Using Householder Reflections},\n author = {Zakaria Mhammedi and Andrew Hellicar and Ashfaqur Rahman and James Bailey},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2401--2409},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mhammedi17a/mhammedi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mhammedi17a.html},\n abstract = \t {The problem of learning long-term dependencies in sequences using Recurrent Neural Networks (RNNs) is still a major challenge. Recent methods have been suggested to solve this problem by constraining the transition matrix to be unitary during training which ensures that its norm is equal to one and prevents exploding gradients. These methods either have limited expressiveness or scale poorly with the size of the network when compared with the simple RNN case, especially when using stochastic gradient descent with a small mini-batch size. Our contributions are as follows; we first show that constraining the transition matrix to be unitary is a special case of an orthogonal constraint. Then we present a new parametrisation of the transition matrix which allows efficient training of an RNN while ensuring that the matrix is always orthogonal. Our results show that the orthogonal constraint on the transition matrix applied through our parametrisation gives similar benefits to the unitary constraint, without the time complexity limitations.}\n}", "pdf": "http://proceedings.mlr.press/v70/mhammedi17a/mhammedi17a.pdf", "supp": "", "pdf_size": 432591, "gs_citation": 177, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11682526623340051803&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "The University of Melbourne, Parkville, Australia+Data61, CSIRO, Australia; Data61, CSIRO, Australia; Data61, CSIRO, Australia; The University of Melbourne, Parkville, Australia", "aff_domain": "data61.csiro.au; ; ; ", "email": "data61.csiro.au; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/mhammedi17a.html", "aff_unique_index": "0+1;1;1;0", "aff_unique_norm": "University of Melbourne;CSIRO", "aff_unique_dep": ";Data61", "aff_unique_url": "https://www.unimelb.edu.au;https://www.csiro.au", "aff_unique_abbr": "UniMelb;CSIRO", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Parkville;", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "Australia" }, { "title": "Efficient Regret Minimization in Non-Convex Games", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/581", "id": "581", "author_site": "Elad Hazan, Karan Singh, Cyril Zhang", "author": "Elad Hazan; Karan Singh; Cyril Zhang", "abstract": "We consider regret minimization in repeated games with non-convex loss functions. Minimizing the standard notion of regret is computationally intractable. Thus, we define a natural notion of regret which permits efficient optimization and generalizes offline guarantees for convergence to an approximate local optimum. We give gradient-based methods that achieve optimal regret, which in turn guarantee convergence to equilibrium in this framework.", "bibtex": "@InProceedings{pmlr-v70-hazan17a,\n title = \t {Efficient Regret Minimization in Non-Convex Games},\n author = {Elad Hazan and Karan Singh and Cyril Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1433--1441},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hazan17a/hazan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hazan17a.html},\n abstract = \t {We consider regret minimization in repeated games with non-convex loss functions. Minimizing the standard notion of regret is computationally intractable. Thus, we define a natural notion of regret which permits efficient optimization and generalizes offline guarantees for convergence to an approximate local optimum. We give gradient-based methods that achieve optimal regret, which in turn guarantee convergence to equilibrium in this framework.}\n}", "pdf": "http://proceedings.mlr.press/v70/hazan17a/hazan17a.pdf", "supp": "", "pdf_size": 475260, "gs_citation": 118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=860080838858354614&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Computer Science, Princeton University; Computer Science, Princeton University; Computer Science, Princeton University", "aff_domain": "princeton.edu;princeton.edu;princeton.edu", "email": "princeton.edu;princeton.edu;princeton.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hazan17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Princeton", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Efficient softmax approximation for GPUs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/762", "id": "762", "author_site": "Edouard Grave, Armand Joulin, Moustapha Cisse, David Grangier, Herve Jegou", "author": "Grave; Armand Joulin; Moustapha Ciss\u00e9; David Grangier; Herv\u00e9 J\u00e9gou", "abstract": "We propose an approximate strategy to efficiently train neural network based language models over very large vocabularies. Our approach, called adaptive softmax, circumvents the linear dependency on the vocabulary size by exploiting the unbalanced word distribution to form clusters that explicitly minimize the expectation of computation time. Our approach further reduces the computational cost by exploiting the specificities of modern architectures and matrix-matrix vector operations, making it particularly suited for graphical processing units. Our experiments carried out on standard benchmarks, such as EuroParl and One Billion Word, show that our approach brings a large gain in efficiency over standard approximations while achieving an accuracy close to that of the full softmax. The code of our method is available at https://github.com/facebookresearch/adaptive-softmax.", "bibtex": "@InProceedings{pmlr-v70-grave17a,\n title = \t {Efficient softmax approximation for {GPU}s},\n author = {{\\'E}douard Grave and Armand Joulin and Moustapha Ciss{\\'e} and David Grangier and Herv{\\'e} J{\\'e}gou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1302--1310},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/grave17a/grave17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/grave17a.html},\n abstract = \t {We propose an approximate strategy to efficiently train neural network based language models over very large vocabularies. Our approach, called adaptive softmax, circumvents the linear dependency on the vocabulary size by exploiting the unbalanced word distribution to form clusters that explicitly minimize the expectation of computation time. Our approach further reduces the computational cost by exploiting the specificities of modern architectures and matrix-matrix vector operations, making it particularly suited for graphical processing units. Our experiments carried out on standard benchmarks, such as EuroParl and One Billion Word, show that our approach brings a large gain in efficiency over standard approximations while achieving an accuracy close to that of the full softmax. The code of our method is available at https://github.com/facebookresearch/adaptive-softmax.}\n}", "pdf": "http://proceedings.mlr.press/v70/grave17a/grave17a.pdf", "supp": "", "pdf_size": 2160193, "gs_citation": 348, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1046983305372158167&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com; ; ; ; ", "email": "fb.com; ; ; ; ", "github": "https://github.com/facebookresearch/adaptive-softmax", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/grave17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Emulating the Expert: Inverse Optimization through Online Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/865", "id": "865", "author_site": "Sebastian Pokutta, Andreas B\u00e4rmann, Oskar Schneider", "author": "Andreas B\u00e4rmann; Sebastian Pokutta; Oskar Schneider", "abstract": "In this paper, we demonstrate how to learn the objective function of a decision maker while only observing the problem input data and the decision maker\u2019s corresponding decisions over multiple rounds. Our approach is based on online learning techniques and works for linear objectives over arbitrary sets for which we have a linear optimization oracle and as such generalizes previous work based on KKT-system decomposition and dualization approaches. The applicability of our framework for learning linear constraints is also discussed briefly. Our algorithm converges at a rate of O(1/sqrt(T)), and we demonstrate its effectiveness and applications in preliminary computational results.", "bibtex": "@InProceedings{pmlr-v70-barmann17a,\n title = \t {Emulating the Expert: Inverse Optimization through Online Learning},\n author = {Andreas B{\\\"a}rmann and Sebastian Pokutta and Oskar Schneider},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {400--410},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/barmann17a/barmann17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/barmann17a.html},\n abstract = \t {In this paper, we demonstrate how to learn the objective function of a decision maker while only observing the problem input data and the decision maker\u2019s corresponding decisions over multiple rounds. Our approach is based on online learning techniques and works for linear objectives over arbitrary sets for which we have a linear optimization oracle and as such generalizes previous work based on KKT-system decomposition and dualization approaches. The applicability of our framework for learning linear constraints is also discussed briefly. Our algorithm converges at a rate of O(1/sqrt(T)), and we demonstrate its effectiveness and applications in preliminary computational results.}\n}", "pdf": "http://proceedings.mlr.press/v70/barmann17a/barmann17a.pdf", "supp": "", "pdf_size": 591915, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5508778652112284262&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg, Erlangen, Germany+Georgia Institute of Technology, Atlanta, USA; Georgia Institute of Technology, Atlanta, USA; Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg, Erlangen, Germany", "aff_domain": "math.uni-erlangen.de;isye.gatech.edu;fau.de", "email": "math.uni-erlangen.de;isye.gatech.edu;fau.de", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/barmann17a.html", "aff_unique_index": "0+1;1;0", "aff_unique_norm": "Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg;Georgia Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www fau.de;https://www.gatech.edu", "aff_unique_abbr": "FAU;Georgia Tech", "aff_campus_unique_index": "0+1;1;0", "aff_campus_unique": "Erlangen;Atlanta", "aff_country_unique_index": "0+1;1;0", "aff_country_unique": "Germany;United States" }, { "title": "End-to-End Differentiable Adversarial Imitation Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/586", "id": "586", "author_site": "Nir Baram, Oron Anschel, Itai Caspi, Shie Mannor", "author": "Nir Baram; Oron Anschel; Itai Caspi; Shie Mannor", "abstract": "Generative Adversarial Networks (GANs) have been successfully applied to the problem of", "bibtex": "@InProceedings{pmlr-v70-baram17a,\n title = \t {End-to-End Differentiable Adversarial Imitation Learning},\n author = {Nir Baram and Oron Anschel and Itai Caspi and Shie Mannor},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {390--399},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/baram17a/baram17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/baram17a.html},\n abstract = \t {Generative Adversarial Networks (GANs) have been successfully applied to the problem of", "pdf": "http://proceedings.mlr.press/v70/baram17a/baram17a.pdf", "supp": "", "pdf_size": 1678716, "gs_citation": 125, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10694712957678274320&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Technion Institute of Technology; Technion Institute of Technology; Technion Institute of Technology; Technion Institute of Technology", "aff_domain": "campus.technion.ac.il; ; ; ", "email": "campus.technion.ac.il; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/baram17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Technion Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://www.technion.ac.il/en/", "aff_unique_abbr": "Technion", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Israel" }, { "title": "End-to-End Learning for Structured Prediction Energy Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/836", "id": "836", "author_site": "David Belanger, Bishan Yang, Andrew McCallum", "author": "David Belanger; Bishan Yang; Andrew McCallum", "abstract": "Structured Prediction Energy Networks (SPENs) are a simple, yet expressive family of structured prediction models (Belanger and McCallum, 2016). An energy function over candidate structured outputs is given by a deep network, and predictions are formed by gradient-based optimization. This paper presents end-to-end learning for SPENs, where the energy function is discriminatively trained by back-propagating through gradient-based prediction. In our experience, the approach is substantially more accurate than the structured SVM method of Belanger and McCallum (2016), as it allows us to use more sophisticated non-convex energies. We provide a collection of techniques for improving the speed, accuracy, and memory requirements of end-to-end SPENs, and demonstrate the power of our method on 7-Scenes image denoising and CoNLL-2005 semantic role labeling tasks. In both, inexact minimization of non-convex SPEN energies is superior to baseline methods that use simplistic energy functions that can be minimized exactly.", "bibtex": "@InProceedings{pmlr-v70-belanger17a,\n title = \t {End-to-End Learning for Structured Prediction Energy Networks},\n author = {David Belanger and Bishan Yang and Andrew McCallum},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {429--439},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/belanger17a/belanger17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/belanger17a.html},\n abstract = \t {Structured Prediction Energy Networks (SPENs) are a simple, yet expressive family of structured prediction models (Belanger and McCallum, 2016). An energy function over candidate structured outputs is given by a deep network, and predictions are formed by gradient-based optimization. This paper presents end-to-end learning for SPENs, where the energy function is discriminatively trained by back-propagating through gradient-based prediction. In our experience, the approach is substantially more accurate than the structured SVM method of Belanger and McCallum (2016), as it allows us to use more sophisticated non-convex energies. We provide a collection of techniques for improving the speed, accuracy, and memory requirements of end-to-end SPENs, and demonstrate the power of our method on 7-Scenes image denoising and CoNLL-2005 semantic role labeling tasks. In both, inexact minimization of non-convex SPEN energies is superior to baseline methods that use simplistic energy functions that can be minimized exactly.}\n}", "pdf": "http://proceedings.mlr.press/v70/belanger17a/belanger17a.pdf", "supp": "", "pdf_size": 375416, "gs_citation": 151, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=964848304190100776&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Massachusetts, Amherst; Carnegie Mellon University; University of Massachusetts, Amherst", "aff_domain": "cs.umass.edu; ;cs.umass.edu", "email": "cs.umass.edu; ;cs.umass.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/belanger17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of Massachusetts Amherst;Carnegie Mellon University", "aff_unique_dep": ";", "aff_unique_url": "https://www.umass.edu;https://www.cmu.edu", "aff_unique_abbr": "UMass Amherst;CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Amherst;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Enumerating Distinct Decision Trees", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/538", "id": "538", "author": "Salvatore Ruggieri", "abstract": "The search space for the feature selection problem in decision tree learning is the lattice of subsets of the available features. We provide an exact enumeration procedure of the subsets that lead to all and only the distinct decision trees. The procedure can be adopted to prune the search space of complete and heuristics search methods in wrapper models for feature selection. Based on this, we design a computational optimization of the sequential backward elimination heuristics with a performance improvement of up to 100X.", "bibtex": "@InProceedings{pmlr-v70-ruggieri17a,\n title = \t {Enumerating Distinct Decision Trees},\n author = {Salvatore Ruggieri},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2960--2968},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ruggieri17a/ruggieri17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ruggieri17a.html},\n abstract = \t {The search space for the feature selection problem in decision tree learning is the lattice of subsets of the available features. We provide an exact enumeration procedure of the subsets that lead to all and only the distinct decision trees. The procedure can be adopted to prune the search space of complete and heuristics search methods in wrapper models for feature selection. Based on this, we design a computational optimization of the sequential backward elimination heuristics with a performance improvement of up to 100X.}\n}", "pdf": "http://proceedings.mlr.press/v70/ruggieri17a/ruggieri17a.pdf", "supp": "", "pdf_size": 350647, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4793248235111123204&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Pisa and ISTI-CNR, Pisa, Italy", "aff_domain": "di.unipi.it", "email": "di.unipi.it", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/ruggieri17a.html", "aff_unique_index": "0", "aff_unique_norm": "University of Pisa", "aff_unique_dep": "", "aff_unique_url": "https://www.unipi.it", "aff_unique_abbr": "UNipi", "aff_campus_unique_index": "0", "aff_campus_unique": "Pisa", "aff_country_unique_index": "0", "aff_country_unique": "Italy" }, { "title": "Equivariance Through Parameter-Sharing", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/686", "id": "686", "author_site": "Siamak Ravanbakhsh, Jeff Schneider, Barnab\u00e1s P\u00f3czos", "author": "Siamak Ravanbakhsh; Jeff Schneider; Barnab\u00e1s P\u00f3czos", "abstract": "We propose to study equivariance in deep neural networks through parameter symmetries. In particular, given a group G that acts discretely on the input and output of a standard neural network layer, we show that its equivariance is linked to the symmetry group of network parameters. We then propose two parameter-sharing scheme to induce the desirable symmetry on the parameters of the neural network. Under some conditions on the action of G, our procedure for tying the parameters achieves G-equivariance and guarantees sensitivity to all other permutation groups outside of G.", "bibtex": "@InProceedings{pmlr-v70-ravanbakhsh17a,\n title = \t {Equivariance Through Parameter-Sharing},\n author = {Siamak Ravanbakhsh and Jeff Schneider and Barnab{\\'a}s P{\\'o}czos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2892--2901},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ravanbakhsh17a/ravanbakhsh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ravanbakhsh17a.html},\n abstract = \t {We propose to study equivariance in deep neural networks through parameter symmetries. In particular, given a group G that acts discretely on the input and output of a standard neural network layer, we show that its equivariance is linked to the symmetry group of network parameters. We then propose two parameter-sharing scheme to induce the desirable symmetry on the parameters of the neural network. Under some conditions on the action of G, our procedure for tying the parameters achieves G-equivariance and guarantees sensitivity to all other permutation groups outside of G.}\n}", "pdf": "http://proceedings.mlr.press/v70/ravanbakhsh17a/ravanbakhsh17a.pdf", "supp": "", "pdf_size": 623247, "gs_citation": 305, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17269492350417712345&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ravanbakhsh17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Estimating individual treatment effect: generalization bounds and algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/790", "id": "790", "author_site": "Uri Shalit, Fredrik D Johansson, David Sontag", "author": "Uri Shalit; Fredrik D. Johansson; David Sontag", "abstract": "There is intense interest in applying machine learning to problems of causal inference in fields such as healthcare, economics and education. In particular, individual-level causal inference has important applications such as precision medicine. We give a new theoretical analysis and family of algorithms for predicting individual treatment effect (ITE) from observational data, under the assumption known as strong ignorability. The algorithms learn a \u201cbalanced\u201d representation such that the induced treated and control distributions look similar, and we give a novel and intuitive generalization-error bound showing the expected ITE estimation error of a representation is bounded by a sum of the standard generalization-error of that representation and the distance between the treated and control distributions induced by the representation. We use Integral Probability Metrics to measure distances between distributions, deriving explicit bounds for the Wasserstein and Maximum Mean Discrepancy (MMD) distances. Experiments on real and simulated data show the new algorithms match or outperform the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v70-shalit17a,\n title = \t {Estimating individual treatment effect: generalization bounds and algorithms},\n author = {Uri Shalit and Fredrik D. Johansson and David Sontag},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3076--3085},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shalit17a/shalit17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shalit17a.html},\n abstract = \t {There is intense interest in applying machine learning to problems of causal inference in fields such as healthcare, economics and education. In particular, individual-level causal inference has important applications such as precision medicine. We give a new theoretical analysis and family of algorithms for predicting individual treatment effect (ITE) from observational data, under the assumption known as strong ignorability. The algorithms learn a \u201cbalanced\u201d representation such that the induced treated and control distributions look similar, and we give a novel and intuitive generalization-error bound showing the expected ITE estimation error of a representation is bounded by a sum of the standard generalization-error of that representation and the distance between the treated and control distributions induced by the representation. We use Integral Probability Metrics to measure distances between distributions, deriving explicit bounds for the Wasserstein and Maximum Mean Discrepancy (MMD) distances. Experiments on real and simulated data show the new algorithms match or outperform the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v70/shalit17a/shalit17a.pdf", "supp": "", "pdf_size": 501240, "gs_citation": 1331, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6551680371514968121&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "CIMS, New York University, New York, NY 10003; IMES, MIT, Cambridge, MA 02142 + CSAIL, MIT, Cambridge, MA 02139; CSAIL, MIT, Cambridge, MA 02139", "aff_domain": "cs.nyu.edu;mit.edu;csail.mit.edu", "email": "cs.nyu.edu;mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/shalit17a.html", "aff_unique_index": "0;1+1;1", "aff_unique_norm": "New York University;Massachusetts Institute of Technology", "aff_unique_dep": "Courant Institute of Mathematical Sciences;IMES", "aff_unique_url": "https://www.nyu.edu;https://www.mit.edu", "aff_unique_abbr": "NYU;MIT", "aff_campus_unique_index": "0;1+1;1", "aff_campus_unique": "New York;Cambridge", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "United States" }, { "title": "Estimating the unseen from multiple populations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/885", "id": "885", "author_site": "Aditi Raghunathan, Greg Valiant, James Zou", "author": "Aditi Raghunathan; Gregory Valiant; James Zou", "abstract": "Given samples from a distribution, how many new elements should we expect to find if we keep on sampling this distribution? This is an important and actively studied problem, with many applications ranging from species estimation to genomics. We generalize this extrapolation and related unseen estimation problems to the multiple population setting, where population $j$ has an unknown distribution $D_j$ from which we observe $n_j$ samples. We derive an optimal estimator for the total number of elements we expect to find among new samples across the populations. Surprisingly, we prove that our estimator\u2019s accuracy is independent of the number of populations. We also develop an efficient optimization algorithm to solve the more general problem of estimating multi-population frequency distributions. We validate our methods and theory through extensive experiments. Finally, on a real dataset of human genomes across multiple ancestries, we demonstrate how our approach for unseen estimation can enable cohort designs that can discover interesting mutations with greater efficiency.", "bibtex": "@InProceedings{pmlr-v70-raghunathan17a,\n title = \t {Estimating the unseen from multiple populations},\n author = {Aditi Raghunathan and Gregory Valiant and James Zou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2855--2863},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/raghunathan17a/raghunathan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/raghunathan17a.html},\n abstract = \t {Given samples from a distribution, how many new elements should we expect to find if we keep on sampling this distribution? This is an important and actively studied problem, with many applications ranging from species estimation to genomics. We generalize this extrapolation and related unseen estimation problems to the multiple population setting, where population $j$ has an unknown distribution $D_j$ from which we observe $n_j$ samples. We derive an optimal estimator for the total number of elements we expect to find among new samples across the populations. Surprisingly, we prove that our estimator\u2019s accuracy is independent of the number of populations. We also develop an efficient optimization algorithm to solve the more general problem of estimating multi-population frequency distributions. We validate our methods and theory through extensive experiments. Finally, on a real dataset of human genomes across multiple ancestries, we demonstrate how our approach for unseen estimation can enable cohort designs that can discover interesting mutations with greater efficiency.}\n}", "pdf": "http://proceedings.mlr.press/v70/raghunathan17a/raghunathan17a.pdf", "supp": "", "pdf_size": 1055101, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2219039707642364162&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stanford University; Stanford University; Stanford University + Chan Zuckerberg Biohub", "aff_domain": "stanford.edu;stanford.edu;stanford.edu", "email": "stanford.edu;stanford.edu;stanford.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/raghunathan17a.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "Stanford University;Chan Zuckerberg Biohub", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://www.chanzuckerberg.com/science/biohub", "aff_unique_abbr": "Stanford;", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford;", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United States" }, { "title": "Evaluating Bayesian Models with Posterior Dispersion Indices", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/514", "id": "514", "author_site": "Alp Kucukelbir, Yixin Wang, David Blei", "author": "Alp Kucukelbir; Yixin Wang; David M. Blei", "abstract": "Probabilistic modeling is cyclical: we specify a model, infer its posterior, and evaluate its performance. Evaluation drives the cycle, as we revise our model based on how it performs. This requires a metric. Traditionally, predictive accuracy prevails. Yet, predictive accuracy does not tell the whole story. We propose to evaluate a model through posterior dispersion. The idea is to analyze how each datapoint fares in relation to posterior uncertainty around the hidden structure. This highlights datapoints the model struggles to explain and provides complimentary insight to datapoints with low predictive accuracy. We present a family of posterior dispersion indices (PDI) that capture this idea. We show how a PDI identifies patterns of model mismatch in three real data examples: voting preferences, supermarket shopping, and population genetics.", "bibtex": "@InProceedings{pmlr-v70-kucukelbir17a,\n title = \t {Evaluating {B}ayesian Models with Posterior Dispersion Indices},\n author = {Alp Kucukelbir and Yixin Wang and David M. Blei},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1925--1934},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kucukelbir17a/kucukelbir17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kucukelbir17a.html},\n abstract = \t {Probabilistic modeling is cyclical: we specify a model, infer its posterior, and evaluate its performance. Evaluation drives the cycle, as we revise our model based on how it performs. This requires a metric. Traditionally, predictive accuracy prevails. Yet, predictive accuracy does not tell the whole story. We propose to evaluate a model through posterior dispersion. The idea is to analyze how each datapoint fares in relation to posterior uncertainty around the hidden structure. This highlights datapoints the model struggles to explain and provides complimentary insight to datapoints with low predictive accuracy. We present a family of posterior dispersion indices (PDI) that capture this idea. We show how a PDI identifies patterns of model mismatch in three real data examples: voting preferences, supermarket shopping, and population genetics.}\n}", "pdf": "http://proceedings.mlr.press/v70/kucukelbir17a/kucukelbir17a.pdf", "supp": "", "pdf_size": 255908, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=775447223528253510&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Columbia University; Columbia University; Columbia University", "aff_domain": "cs.columbia.edu; ; ", "email": "cs.columbia.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kucukelbir17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Evaluating the Variance of Likelihood-Ratio Gradient Estimators", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/608", "id": "608", "author_site": "Seiya Tokui, Issei Sato", "author": "Seiya Tokui; Issei Sato", "abstract": "The likelihood-ratio method is often used to estimate gradients of stochastic computations, for which baselines are required to reduce the estimation variance. Many types of baselines have been proposed, although their degree of optimality is not well understood. In this study, we establish a novel framework of gradient estimation that includes most of the common gradient estimators as special cases. The framework gives a natural derivation of the optimal estimator that can be interpreted as a special case of the likelihood-ratio method so that we can evaluate the optimal degree of practical techniques with it. It bridges the likelihood-ratio method and the reparameterization trick while still supporting discrete variables. It is derived from the exchange property of the differentiation and integration. To be more specific, it is derived by the reparameterization trick and local marginalization analogous to the local expectation gradient. We evaluate various baselines and the optimal estimator for variational learning and show that the performance of the modern estimators is close to the optimal estimator.", "bibtex": "@InProceedings{pmlr-v70-tokui17a,\n title = \t {Evaluating the Variance of Likelihood-Ratio Gradient Estimators},\n author = {Seiya Tokui and Issei Sato},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3414--3423},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tokui17a/tokui17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tokui17a.html},\n abstract = \t {The likelihood-ratio method is often used to estimate gradients of stochastic computations, for which baselines are required to reduce the estimation variance. Many types of baselines have been proposed, although their degree of optimality is not well understood. In this study, we establish a novel framework of gradient estimation that includes most of the common gradient estimators as special cases. The framework gives a natural derivation of the optimal estimator that can be interpreted as a special case of the likelihood-ratio method so that we can evaluate the optimal degree of practical techniques with it. It bridges the likelihood-ratio method and the reparameterization trick while still supporting discrete variables. It is derived from the exchange property of the differentiation and integration. To be more specific, it is derived by the reparameterization trick and local marginalization analogous to the local expectation gradient. We evaluate various baselines and the optimal estimator for variational learning and show that the performance of the modern estimators is close to the optimal estimator.}\n}", "pdf": "http://proceedings.mlr.press/v70/tokui17a/tokui17a.pdf", "supp": "", "pdf_size": 663198, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2700518502045291483&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Preferred Networks, Tokyo, Japan+The University of Tokyo, Tokyo, Japan; The University of Tokyo, Tokyo, Japan+RIken, Tokyo, Japan", "aff_domain": "preferred.jp;k.u-tokyo.ac.jp", "email": "preferred.jp;k.u-tokyo.ac.jp", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/tokui17a.html", "aff_unique_index": "0+1;1+2", "aff_unique_norm": "Preferred Networks;University of Tokyo;RIKEN", "aff_unique_dep": ";;", "aff_unique_url": "https://www.preferred-networks.com;https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": ";UTokyo;RIKEN", "aff_campus_unique_index": "0+0;0+0", "aff_campus_unique": "Tokyo", "aff_country_unique_index": "0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Exact Inference for Integer Latent-Variable Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/771", "id": "771", "author_site": "Kevin Winner, Debora Sujono, Daniel Sheldon", "author": "Kevin Winner; Debora Sujono; Dan Sheldon", "abstract": "Graphical models with latent count variables arise in a number of areas. However, standard inference algorithms do not apply to these models due to the infinite support of the latent variables. Winner and Sheldon (2016) recently developed a new technique using probability generating functions (PGFs) to perform efficient, exact inference for certain Poisson latent variable models. However, the method relies on symbolic manipulation of PGFs, and it is unclear whether this can be extended to more general models. In this paper we introduce a new approach for inference with PGFs: instead of manipulating PGFs symbolically, we adapt techniques from the autodiff literature to compute the higher-order derivatives necessary for inference. This substantially generalizes the class of models for which efficient, exact inference algorithms are available. Specifically, our results apply to a class of models that includes branching processes, which are widely used in applied mathematics and population ecology, and autoregressive models for integer data. Experiments show that our techniques are more scalable than existing approximate methods and enable new applications.", "bibtex": "@InProceedings{pmlr-v70-winner17a,\n title = \t {Exact Inference for Integer Latent-Variable Models},\n author = {Kevin Winner and Debora Sujono and Dan Sheldon},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3761--3770},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/winner17a/winner17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/winner17a.html},\n abstract = \t {Graphical models with latent count variables arise in a number of areas. However, standard inference algorithms do not apply to these models due to the infinite support of the latent variables. Winner and Sheldon (2016) recently developed a new technique using probability generating functions (PGFs) to perform efficient, exact inference for certain Poisson latent variable models. However, the method relies on symbolic manipulation of PGFs, and it is unclear whether this can be extended to more general models. In this paper we introduce a new approach for inference with PGFs: instead of manipulating PGFs symbolically, we adapt techniques from the autodiff literature to compute the higher-order derivatives necessary for inference. This substantially generalizes the class of models for which efficient, exact inference algorithms are available. Specifically, our results apply to a class of models that includes branching processes, which are widely used in applied mathematics and population ecology, and autoregressive models for integer data. Experiments show that our techniques are more scalable than existing approximate methods and enable new applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/winner17a/winner17a.pdf", "supp": "", "pdf_size": 1827917, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18092438335933601664&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst + Department of Computer Science, Mount Holyoke College", "aff_domain": "cs.umass.edu; ; ", "email": "cs.umass.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/winner17a.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "University of Massachusetts Amherst;Mount Holyoke College", "aff_unique_dep": "College of Information and Computer Sciences;Department of Computer Science", "aff_unique_url": "https://www.umass.edu;https://www.mtholyoke.edu", "aff_unique_abbr": "UMass Amherst;MHC", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Amherst;", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United States" }, { "title": "Exact MAP Inference by Avoiding Fractional Vertices", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/729", "id": "729", "author_site": "Erik Lindgren, Alexandros Dimakis, Adam Klivans", "author": "Erik M. Lindgren; Alexandros G. Dimakis; Adam Klivans", "abstract": "Given a graphical model, one essential problem is MAP inference, that is, finding the most likely configuration of states according to the model. Although this problem is NP-hard, large instances can be solved in practice and it is a major open question is to explain why this is true. We give a natural condition under which we can provably perform MAP inference in polynomial time\u2014we require that the number of fractional vertices in the LP relaxation exceeding the optimal solution is bounded by a polynomial in the problem size. This resolves an open question by Dimakis, Gohari, and Wainwright. In contrast, for general LP relaxations of integer programs, known techniques can only handle a constant number of fractional vertices whose value exceeds the optimal solution. We experimentally verify this condition and demonstrate how efficient various integer programming methods are at removing fractional solutions.", "bibtex": "@InProceedings{pmlr-v70-lindgren17a,\n title = \t {Exact {MAP} Inference by Avoiding Fractional Vertices},\n author = {Erik M. Lindgren and Alexandros G. Dimakis and Adam Klivans},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2120--2129},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lindgren17a/lindgren17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lindgren17a.html},\n abstract = \t {Given a graphical model, one essential problem is MAP inference, that is, finding the most likely configuration of states according to the model. Although this problem is NP-hard, large instances can be solved in practice and it is a major open question is to explain why this is true. We give a natural condition under which we can provably perform MAP inference in polynomial time\u2014we require that the number of fractional vertices in the LP relaxation exceeding the optimal solution is bounded by a polynomial in the problem size. This resolves an open question by Dimakis, Gohari, and Wainwright. In contrast, for general LP relaxations of integer programs, known techniques can only handle a constant number of fractional vertices whose value exceeds the optimal solution. We experimentally verify this condition and demonstrate how efficient various integer programming methods are at removing fractional solutions.}\n}", "pdf": "http://proceedings.mlr.press/v70/lindgren17a/lindgren17a.pdf", "supp": "", "pdf_size": 283262, "gs_citation": 4, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5494193698352832868&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, University of Texas at Austin, USA+Department of Computer Science, University of Texas at Austin, USA; Department of Electrical and Computer Engineering, University of Texas at Austin, USA+Department of Computer Science, University of Texas at Austin, USA; Department of Computer Science, University of Texas at Austin, USA", "aff_domain": "utexas.edu;austin.utexas.edu;cs.utexas.edu", "email": "utexas.edu;austin.utexas.edu;cs.utexas.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/lindgren17a.html", "aff_unique_index": "0+0;0+0;0", "aff_unique_norm": "University of Texas at Austin", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.utexas.edu", "aff_unique_abbr": "UT Austin", "aff_campus_unique_index": "0+0;0+0;0", "aff_campus_unique": "Austin", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Exploiting Strong Convexity from Data with Primal-Dual First-Order Algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/863", "id": "863", "author_site": "Jialei Wang, Lin Xiao", "author": "Jialei Wang; Lin Xiao", "abstract": "We consider empirical risk minimization of linear predictors with convex loss functions. Such problems can be reformulated as convex-concave saddle point problems and solved by primal-dual first-order algorithms. However, primal-dual algorithms often require explicit strongly convex regularization in order to obtain fast linear convergence, and the required dual proximal mapping may not admit closed-form or efficient solution. In this paper, we develop both batch and randomized primal-dual algorithms that can exploit strong convexity from data adaptively and are capable of achieving linear convergence even without regularization. We also present dual-free variants of adaptive primal-dual algorithms that do not need the dual proximal mapping, which are especially suitable for logistic regression.", "bibtex": "@InProceedings{pmlr-v70-wang17l,\n title = \t {Exploiting Strong Convexity from Data with Primal-Dual First-Order Algorithms},\n author = {Jialei Wang and Lin Xiao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3694--3702},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17l/wang17l.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17l.html},\n abstract = \t {We consider empirical risk minimization of linear predictors with convex loss functions. Such problems can be reformulated as convex-concave saddle point problems and solved by primal-dual first-order algorithms. However, primal-dual algorithms often require explicit strongly convex regularization in order to obtain fast linear convergence, and the required dual proximal mapping may not admit closed-form or efficient solution. In this paper, we develop both batch and randomized primal-dual algorithms that can exploit strong convexity from data adaptively and are capable of achieving linear convergence even without regularization. We also present dual-free variants of adaptive primal-dual algorithms that do not need the dual proximal mapping, which are especially suitable for logistic regression.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17l/wang17l.pdf", "supp": "", "pdf_size": 447108, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4014215810594889034&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Computer Science, The University of Chicago, Chicago, Illinois 60637, USA; Microsoft Research, Redmond, Washington 98052, USA", "aff_domain": "uchicago.edu;microsoft.com", "email": "uchicago.edu;microsoft.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/wang17l.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Chicago;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Research", "aff_unique_url": "https://www.uchicago.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UChicago;MSR", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Chicago;Redmond", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Failures of Gradient-Based Deep Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/694", "id": "694", "author_site": "Shaked Shammah, Shai Shalev-Shwartz, Ohad Shamir", "author": "Shai Shalev-Shwartz; Ohad Shamir; Shaked Shammah", "abstract": "In recent years, Deep Learning has become the go-to solution for a broad range of applications, often outperforming state-of-the-art. However, it is important, for both theoreticians and practitioners, to gain a deeper understanding of the difficulties and limitations associated with common approaches and algorithms. We describe four types of simple problems, for which the gradient-based algorithms commonly used in deep learning either fail or suffer from significant difficulties. We illustrate the failures through practical experiments, and provide theoretical insights explaining their source, and how they might be remedied.", "bibtex": "@InProceedings{pmlr-v70-shalev-shwartz17a,\n title = \t {Failures of Gradient-Based Deep Learning},\n author = {Shai Shalev-Shwartz and Ohad Shamir and Shaked Shammah},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3067--3075},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shalev-shwartz17a/shalev-shwartz17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shalev-shwartz17a.html},\n abstract = \t {In recent years, Deep Learning has become the go-to solution for a broad range of applications, often outperforming state-of-the-art. However, it is important, for both theoreticians and practitioners, to gain a deeper understanding of the difficulties and limitations associated with common approaches and algorithms. We describe four types of simple problems, for which the gradient-based algorithms commonly used in deep learning either fail or suffer from significant difficulties. We illustrate the failures through practical experiments, and provide theoretical insights explaining their source, and how they might be remedied.}\n}", "pdf": "http://proceedings.mlr.press/v70/shalev-shwartz17a/shalev-shwartz17a.pdf", "supp": "", "pdf_size": 567567, "gs_citation": 249, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16521777684443643145&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computer Science and Engineering, The Hebrew University; Weizmann Institute of Science; School of Computer Science and Engineering, The Hebrew University", "aff_domain": "cs.huji.ac.il;weizmann.ac.il;mail.huji.ac.il", "email": "cs.huji.ac.il;weizmann.ac.il;mail.huji.ac.il", "github": "https://github.com/shakedshammah/failures_of_DL", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/shalev-shwartz17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Hebrew University;Weizmann Institute of Science", "aff_unique_dep": "School of Computer Science and Engineering;", "aff_unique_url": "http://www.huji.ac.il;https://www.weizmann.org.il", "aff_unique_abbr": "HUJI;Weizmann", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Israel" }, { "title": "Fairness in Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/697", "id": "697", "author_site": "Shahin Jabbari, Matthew Joseph, Michael Kearns, Jamie Morgenstern, Aaron Roth", "author": "Shahin Jabbari; Matthew Joseph; Michael Kearns; Jamie Morgenstern; Aaron Roth", "abstract": "We initiate the study of fairness in reinforcement learning, where the actions of a learning algorithm may affect its environment and future rewards. Our fairness constraint requires that an algorithm never prefers one action over another if the long-term (discounted) reward of choosing the latter action is higher. Our first result is negative: despite the fact that fairness is consistent with the optimal policy, any learning algorithm satisfying fairness must take time exponential in the number of states to achieve non-trivial approximation to the optimal policy. We then provide a provably fair polynomial time algorithm under an approximate notion of fairness, thus establishing an exponential gap between exact and approximate fairness.", "bibtex": "@InProceedings{pmlr-v70-jabbari17a,\n title = \t {Fairness in Reinforcement Learning},\n author = {Shahin Jabbari and Matthew Joseph and Michael Kearns and Jamie Morgenstern and Aaron Roth},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1617--1626},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jabbari17a/jabbari17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jabbari17a.html},\n abstract = \t {We initiate the study of fairness in reinforcement learning, where the actions of a learning algorithm may affect its environment and future rewards. Our fairness constraint requires that an algorithm never prefers one action over another if the long-term (discounted) reward of choosing the latter action is higher. Our first result is negative: despite the fact that fairness is consistent with the optimal policy, any learning algorithm satisfying fairness must take time exponential in the number of states to achieve non-trivial approximation to the optimal policy. We then provide a provably fair polynomial time algorithm under an approximate notion of fairness, thus establishing an exponential gap between exact and approximate fairness.}\n}", "pdf": "http://proceedings.mlr.press/v70/jabbari17a/jabbari17a.pdf", "supp": "", "pdf_size": 543715, "gs_citation": 241, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14022912263227076978&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", "aff_domain": "cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "email": "cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu;cis.upenn.edu", "github": "", "project": "https://arxiv.org/pdf/1611.03071.pdf", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/jabbari17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fake News Mitigation via Point Process Based Intervention", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/857", "id": "857", "author_site": "Mehrdad Farajtabar, Jiachen Yang, Xiaojing Ye, Huan Xu, Rakshit Trivedi, Elias Khalil, Shuang Li, Le Song, Hongyuan Zha", "author": "Mehrdad Farajtabar; Jiachen Yang; Xiaojing Ye; Huan Xu; Rakshit Trivedi; Elias Khalil; Shuang Li; Le Song; Hongyuan Zha", "abstract": "We propose the first multistage intervention framework that tackles fake news in social networks by combining reinforcement learning with a point process network activity model. The spread of fake news and mitigation events within the network is modeled by a multivariate Hawkes process with additional exogenous control terms. By choosing a feature representation of states, defining mitigation actions and constructing reward functions to measure the effectiveness of mitigation activities, we map the problem of fake news mitigation into the reinforcement learning framework. We develop a policy iteration method unique to the multivariate networked point process, with the goal of optimizing the actions for maximal reward under budget constraints. Our method shows promising performance in real-time intervention experiments on a Twitter network to mitigate a surrogate fake news campaign, and outperforms alternatives on synthetic datasets.", "bibtex": "@InProceedings{pmlr-v70-farajtabar17a,\n title = \t {Fake News Mitigation via Point Process Based Intervention},\n author = {Mehrdad Farajtabar and Jiachen Yang and Xiaojing Ye and Huan Xu and Rakshit Trivedi and Elias Khalil and Shuang Li and Le Song and Hongyuan Zha},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1097--1106},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/farajtabar17a/farajtabar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/farajtabar17a.html},\n abstract = \t {We propose the first multistage intervention framework that tackles fake news in social networks by combining reinforcement learning with a point process network activity model. The spread of fake news and mitigation events within the network is modeled by a multivariate Hawkes process with additional exogenous control terms. By choosing a feature representation of states, defining mitigation actions and constructing reward functions to measure the effectiveness of mitigation activities, we map the problem of fake news mitigation into the reinforcement learning framework. We develop a policy iteration method unique to the multivariate networked point process, with the goal of optimizing the actions for maximal reward under budget constraints. Our method shows promising performance in real-time intervention experiments on a Twitter network to mitigate a surrogate fake news campaign, and outperforms alternatives on synthetic datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/farajtabar17a/farajtabar17a.pdf", "supp": "", "pdf_size": 839879, "gs_citation": 222, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17197173349668318810&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computational Science and Engineering, Georgia Tech; School of Computational Science and Engineering, Georgia Tech; Department of Mathematics and Statistics, Georgia State University; School of Industrial and Systems Engineering, Georgia Tech; School of Computational Science and Engineering, Georgia Tech; School of Computational Science and Engineering, Georgia Tech; School of Industrial and Systems Engineering, Georgia Tech; School of Computational Science and Engineering, Georgia Tech; School of Computational Science and Engineering, Georgia Tech", "aff_domain": "gatech.edu; ; ; ; ; ; ; ; ", "email": "gatech.edu; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 9, "oa": "https://proceedings.mlr.press/v70/farajtabar17a.html", "aff_unique_index": "0;0;1;0;0;0;0;0;0", "aff_unique_norm": "Georgia Institute of Technology;Georgia State University", "aff_unique_dep": "School of Computational Science and Engineering;Department of Mathematics and Statistics", "aff_unique_url": "https://www.gatech.edu;https://www.gsu.edu", "aff_unique_abbr": "Georgia Tech;GSU", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "Atlanta;", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Fast Bayesian Intensity Estimation for the Permanental Process", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/622", "id": "622", "author_site": "Christian Walder, Adrian N Bishop", "author": "Christian J. Walder; Adrian N. Bishop", "abstract": "The Cox process is a stochastic process which generalises the Poisson process by letting the underlying intensity function itself be a stochastic process. In this paper we present a fast Bayesian inference scheme for the permanental process, a Cox process under which the square root of the intensity is a Gaussian process. In particular we exploit connections with reproducing kernel Hilbert spaces, to derive efficient approximate Bayesian inference algorithms based on the Laplace approximation to the predictive distribution and marginal likelihood. We obtain a simple algorithm which we apply to toy and real-world problems, obtaining orders of magnitude speed improvements over previous work.", "bibtex": "@InProceedings{pmlr-v70-walder17a,\n title = \t {Fast {B}ayesian Intensity Estimation for the Permanental Process},\n author = {Christian J. Walder and Adrian N. Bishop},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3579--3588},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/walder17a/walder17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/walder17a.html},\n abstract = \t {The Cox process is a stochastic process which generalises the Poisson process by letting the underlying intensity function itself be a stochastic process. In this paper we present a fast Bayesian inference scheme for the permanental process, a Cox process under which the square root of the intensity is a Gaussian process. In particular we exploit connections with reproducing kernel Hilbert spaces, to derive efficient approximate Bayesian inference algorithms based on the Laplace approximation to the predictive distribution and marginal likelihood. We obtain a simple algorithm which we apply to toy and real-world problems, obtaining orders of magnitude speed improvements over previous work.}\n}", "pdf": "http://proceedings.mlr.press/v70/walder17a/walder17a.pdf", "supp": "", "pdf_size": 2758926, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1744621954281673801&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Data61, CSIRO, Australia+The Australian National University; Data61, CSIRO, Australia+The Australian National University+University of Technology Sydney", "aff_domain": "anu.edu.au; ", "email": "anu.edu.au; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/walder17a.html", "aff_unique_index": "0+1;0+1+2", "aff_unique_norm": "CSIRO;Australian National University;University of Technology Sydney", "aff_unique_dep": "Data61;;", "aff_unique_url": "https://www.csiro.au;https://www.anu.edu.au;https://www.uts.edu.au", "aff_unique_abbr": "CSIRO;ANU;UTS", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0+0", "aff_country_unique": "Australia" }, { "title": "Fast k-Nearest Neighbour Search via Prioritized DCI", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/468", "id": "468", "author_site": "Ke Li, Jitendra Malik", "author": "Ke Li; Jitendra Malik", "abstract": "Most exact methods for k-nearest neighbour search suffer from the curse of dimensionality; that is, their query times exhibit exponential dependence on either the ambient or the intrinsic dimensionality. Dynamic Continuous Indexing (DCI) offers a promising way of circumventing the curse and successfully reduces the dependence of query time on intrinsic dimensionality from exponential to sublinear. In this paper, we propose a variant of DCI, which we call Prioritized DCI, and show a remarkable improvement in the dependence of query time on intrinsic dimensionality. In particular, a linear increase in intrinsic dimensionality, or equivalently, an exponential increase in the number of points near a query, can be mostly counteracted with just a linear increase in space. We also demonstrate empirically that Prioritized DCI significantly outperforms prior methods. In particular, relative to Locality-Sensitive Hashing (LSH), Prioritized DCI reduces the number of distance evaluations by a factor of 14 to 116 and the memory consumption by a factor of 21.", "bibtex": "@InProceedings{pmlr-v70-li17d,\n title = \t {Fast k-Nearest Neighbour Search via Prioritized {DCI}},\n author = {Ke Li and Jitendra Malik},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2081--2090},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17d/li17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17d.html},\n abstract = \t {Most exact methods for k-nearest neighbour search suffer from the curse of dimensionality; that is, their query times exhibit exponential dependence on either the ambient or the intrinsic dimensionality. Dynamic Continuous Indexing (DCI) offers a promising way of circumventing the curse and successfully reduces the dependence of query time on intrinsic dimensionality from exponential to sublinear. In this paper, we propose a variant of DCI, which we call Prioritized DCI, and show a remarkable improvement in the dependence of query time on intrinsic dimensionality. In particular, a linear increase in intrinsic dimensionality, or equivalently, an exponential increase in the number of points near a query, can be mostly counteracted with just a linear increase in space. We also demonstrate empirically that Prioritized DCI significantly outperforms prior methods. In particular, relative to Locality-Sensitive Hashing (LSH), Prioritized DCI reduces the number of distance evaluations by a factor of 14 to 116 and the memory consumption by a factor of 21.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17d/li17d.pdf", "supp": "", "pdf_size": 803089, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1119587906302137606&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "University of California, Berkeley, CA 94720, United States; University of California, Berkeley, CA 94720, United States", "aff_domain": "eecs.berkeley.edu; ", "email": "eecs.berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/li17d.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Faster Greedy MAP Inference for Determinantal Point Processes", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/638", "id": "638", "author_site": "Insu Han, Prabhanjan Kambadur, Kyoungsoo Park, Jinwoo Shin", "author": "Insu Han; Prabhanjan Kambadur; Kyoungsoo Park; Jinwoo Shin", "abstract": "Determinantal point processes (DPPs) are popular probabilistic models that arise in many machine learning tasks, where distributions of diverse sets are characterized by determinants of their features. In this paper, we develop fast algorithms to find the most likely configuration (MAP) of large-scale DPPs, which is NP-hard in general. Due to the submodular nature of the MAP objective, greedy algorithms have been used with empirical success. Greedy implementations require computation of log-determinants, matrix inverses or solving linear systems at each iteration. We present faster implementations of the greedy algorithms by utilizing the orthogonal benefits of two log-determinant approximation schemes: (a) first-order expansions to the matrix log-determinant function and (b) high-order expansions to the scalar log function with stochastic trace estimators. In our experiments, our algorithms are orders of magnitude faster than their competitors, while sacrificing marginal accuracy.", "bibtex": "@InProceedings{pmlr-v70-han17a,\n title = \t {Faster Greedy {MAP} Inference for Determinantal Point Processes},\n author = {Insu Han and Prabhanjan Kambadur and Kyoungsoo Park and Jinwoo Shin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1384--1393},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/han17a/han17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/han17a.html},\n abstract = \t {Determinantal point processes (DPPs) are popular probabilistic models that arise in many machine learning tasks, where distributions of diverse sets are characterized by determinants of their features. In this paper, we develop fast algorithms to find the most likely configuration (MAP) of large-scale DPPs, which is NP-hard in general. Due to the submodular nature of the MAP objective, greedy algorithms have been used with empirical success. Greedy implementations require computation of log-determinants, matrix inverses or solving linear systems at each iteration. We present faster implementations of the greedy algorithms by utilizing the orthogonal benefits of two log-determinant approximation schemes: (a) first-order expansions to the matrix log-determinant function and (b) high-order expansions to the scalar log function with stochastic trace estimators. In our experiments, our algorithms are orders of magnitude faster than their competitors, while sacrificing marginal accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v70/han17a/han17a.pdf", "supp": "", "pdf_size": 832633, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18100214437380555749&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea; Bloomberg LP, 731 Lexington Avenue, New York, NY, 10069; School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea; School of Electrical Engineering, Korea Advanced Institute of Science and Technology (KAIST), Daejeon, Republic of Korea", "aff_domain": "kaist.ac.kr;bloomberg.net;kaist.ac.kr;kaist.ac.kr", "email": "kaist.ac.kr;bloomberg.net;kaist.ac.kr;kaist.ac.kr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/han17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Bloomberg LP", "aff_unique_dep": "School of Electrical Engineering;", "aff_unique_url": "https://www.kaist.ac.kr;https://www.bloomberg.com", "aff_unique_abbr": "KAIST;Bloomberg", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Daejeon;", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "South Korea;United States" }, { "title": "Faster Principal Component Regression and Stable Matrix Chebyshev Approximation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/493", "id": "493", "author_site": "Zeyuan Allen-Zhu, Yuanzhi Li", "author": "Zeyuan Allen-Zhu; Yuanzhi Li", "abstract": "We solve principal component regression (PCR), up to a multiplicative accuracy $1+\\gamma$, by reducing the problem to $\\tilde{O}(\\gamma^{-1})$ black-box calls of ridge regression. Therefore, our algorithm does not require any explicit construction of the top principal components, and is suitable for large-scale PCR instances. In contrast, previous result requires $\\tilde{O}(\\gamma^{-2})$ such black-box calls. We obtain this result by developing a general stable recurrence formula for matrix Chebyshev polynomials, and a degree-optimal polynomial approximation to the matrix sign function. Our techniques may be of independent interests, especially when designing iterative methods.", "bibtex": "@InProceedings{pmlr-v70-allen-zhu17c,\n title = \t {Faster Principal Component Regression and Stable Matrix {C}hebyshev Approximation},\n author = {Zeyuan Allen-Zhu and Yuanzhi Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {107--115},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allen-zhu17c/allen-zhu17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/allen-zhu17c.html},\n abstract = \t {We solve principal component regression (PCR), up to a multiplicative accuracy $1+\\gamma$, by reducing the problem to $\\tilde{O}(\\gamma^{-1})$ black-box calls of ridge regression. Therefore, our algorithm does not require any explicit construction of the top principal components, and is suitable for large-scale PCR instances. In contrast, previous result requires $\\tilde{O}(\\gamma^{-2})$ such black-box calls. We obtain this result by developing a general stable recurrence formula for matrix Chebyshev polynomials, and a degree-optimal polynomial approximation to the matrix sign function. Our techniques may be of independent interests, especially when designing iterative methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/allen-zhu17c/allen-zhu17c.pdf", "supp": "", "pdf_size": 502688, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9999796132565281530&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Microsoft Research; Princeton University", "aff_domain": "csail.mit.edu;cs.princeton.edu", "email": "csail.mit.edu;cs.princeton.edu", "github": "", "project": "https://arxiv.org/abs/1608.04773", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/allen-zhu17c.html", "aff_unique_index": "0;1", "aff_unique_norm": "Microsoft;Princeton University", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.princeton.edu", "aff_unique_abbr": "MSR;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "FeUdal Networks for Hierarchical Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/542", "id": "542", "author_site": "Alexander Vezhnevets, Simon Osindero, Tom Schaul, Nicolas Heess, Max Jaderberg, David Silver, Koray Kavukcuoglu", "author": "Alexander Sasha Vezhnevets; Simon Osindero; Tom Schaul; Nicolas Heess; Max Jaderberg; David Silver; Koray Kavukcuoglu", "abstract": "We introduce FeUdal Networks (FuNs): a novel architecture for hierarchical reinforcement learning. Our approach is inspired by the feudal reinforcement learning proposal of Dayan and Hinton, and gains power and efficacy by decoupling end-to-end learning across multiple levels \u2013 allowing it to utilise different resolutions of time. Our framework employs a Manager module and a Worker module. The Manager operates at a slower time scale and sets abstract goals which are conveyed to and enacted by the Worker. The Worker generates primitive actions at every tick of the environment. The decoupled structure of FuN conveys several benefits \u2013 in addition to facilitating very long timescale credit assignment it also encourages the emergence of sub-policies associated with different goals set by the Manager. These properties allow FuN to dramatically outperform a strong baseline agent on tasks that involve long-term credit assignment or memorisation.", "bibtex": "@InProceedings{pmlr-v70-vezhnevets17a,\n title = \t {{F}e{U}dal Networks for Hierarchical Reinforcement Learning},\n author = {Alexander Sasha Vezhnevets and Simon Osindero and Tom Schaul and Nicolas Heess and Max Jaderberg and David Silver and Koray Kavukcuoglu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3540--3549},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/vezhnevets17a/vezhnevets17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/vezhnevets17a.html},\n abstract = \t {We introduce FeUdal Networks (FuNs): a novel architecture for hierarchical reinforcement learning. Our approach is inspired by the feudal reinforcement learning proposal of Dayan and Hinton, and gains power and efficacy by decoupling end-to-end learning across multiple levels \u2013 allowing it to utilise different resolutions of time. Our framework employs a Manager module and a Worker module. The Manager operates at a slower time scale and sets abstract goals which are conveyed to and enacted by the Worker. The Worker generates primitive actions at every tick of the environment. The decoupled structure of FuN conveys several benefits \u2013 in addition to facilitating very long timescale credit assignment it also encourages the emergence of sub-policies associated with different goals set by the Manager. These properties allow FuN to dramatically outperform a strong baseline agent on tasks that involve long-term credit assignment or memorisation.}\n}", "pdf": "http://proceedings.mlr.press/v70/vezhnevets17a/vezhnevets17a.pdf", "supp": "", "pdf_size": 1296351, "gs_citation": 1192, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2074247135017163310&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/vezhnevets17a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Follow the Compressed Leader: Faster Online Learning of Eigenvectors and Faster MMWU", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/492", "id": "492", "author_site": "Zeyuan Allen-Zhu, Yuanzhi Li", "author": "Zeyuan Allen-Zhu; Yuanzhi Li", "abstract": "The online problem of computing the top eigenvector is fundamental to machine learning. The famous matrix-multiplicative-weight-update (MMWU) framework solves this online problem and gives optimal regret. However, since MMWU runs very slow due to the computation of matrix exponentials, researchers proposed the follow-the-perturbed-leader (FTPL) framework which is faster, but a factor $\\sqrt{d}$ worse than the optimal regret for dimension-$d$ matrices. We propose a", "bibtex": "@InProceedings{pmlr-v70-allen-zhu17d,\n title = \t {Follow the Compressed Leader: Faster Online Learning of Eigenvectors and Faster {MMWU}},\n author = {Zeyuan Allen-Zhu and Yuanzhi Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {116--125},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allen-zhu17d/allen-zhu17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/allen-zhu17d.html},\n abstract = \t {The online problem of computing the top eigenvector is fundamental to machine learning. The famous matrix-multiplicative-weight-update (MMWU) framework solves this online problem and gives optimal regret. However, since MMWU runs very slow due to the computation of matrix exponentials, researchers proposed the follow-the-perturbed-leader (FTPL) framework which is faster, but a factor $\\sqrt{d}$ worse than the optimal regret for dimension-$d$ matrices. We propose a", "pdf": "http://proceedings.mlr.press/v70/allen-zhu17d/allen-zhu17d.pdf", "supp": "", "pdf_size": 969577, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10790060487435879447&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Microsoft Research; Princeton University", "aff_domain": "csail.mit.edu;cs.princeton.edu", "email": "csail.mit.edu;cs.princeton.edu", "github": "", "project": "https://arxiv.org/abs/1701.01722", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/allen-zhu17d.html", "aff_unique_index": "0;1", "aff_unique_norm": "Microsoft;Princeton University", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.princeton.edu", "aff_unique_abbr": "MSR;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Follow the Moving Leader in Deep Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/810", "id": "810", "author_site": "Shuai Zheng, James Kwok", "author": "Shuai Zheng; James T. Kwok", "abstract": "Deep networks are highly nonlinear and difficult to optimize. During training, the parameter iterate may move from one local basin to another, or the data distribution may even change. Inspired by the close connection between stochastic optimization and online learning, we propose a variant of the", "bibtex": "@InProceedings{pmlr-v70-zheng17a,\n title = \t {Follow the Moving Leader in Deep Learning},\n author = {Shuai Zheng and James T. Kwok},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4110--4119},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zheng17a.html},\n abstract = \t {Deep networks are highly nonlinear and difficult to optimize. During training, the parameter iterate may move from one local basin to another, or the data distribution may even change. Inspired by the close connection between stochastic optimization and online learning, we propose a variant of the", "pdf": "http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf", "supp": "", "pdf_size": 1048233, "gs_citation": 32, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13929838044041570845&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science and Engineering, Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong; Department of Computer Science and Engineering, Hong Kong University of Science and Technology, Clear Water Bay, Hong Kong", "aff_domain": "cse.ust.hk;cse.ust.hk", "email": "cse.ust.hk;cse.ust.hk", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/zheng17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Hong Kong University of Science and Technology", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.ust.hk", "aff_unique_abbr": "HKUST", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Forest-type Regression with General Losses and Robust Forest", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/871", "id": "871", "author_site": "Hanbo Li, Andrew Martin", "author": "Alexander Hanbo Li; Andrew Martin", "abstract": "This paper introduces a new general framework for forest-type regression which allows the development of robust forest regressors by selecting from a large family of robust loss functions. In particular, when plugged in the squared error and quantile losses, it will recover the classical random forest and quantile random forest. We then use robust loss functions to develop more robust forest-type regression algorithms. In the experiments, we show by simulation and real data that our robust forests are indeed much more insensitive to outliers, and choosing the right number of nearest neighbors can quickly improve the generalization performance of random forest.", "bibtex": "@InProceedings{pmlr-v70-li17e,\n title = \t {Forest-type Regression with General Losses and Robust Forest},\n author = {Alexander Hanbo Li and Andrew Martin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2091--2100},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17e/li17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17e.html},\n abstract = \t {This paper introduces a new general framework for forest-type regression which allows the development of robust forest regressors by selecting from a large family of robust loss functions. In particular, when plugged in the squared error and quantile losses, it will recover the classical random forest and quantile random forest. We then use robust loss functions to develop more robust forest-type regression algorithms. In the experiments, we show by simulation and real data that our robust forests are indeed much more insensitive to outliers, and choosing the right number of nearest neighbors can quickly improve the generalization performance of random forest.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17e/li17e.pdf", "supp": "", "pdf_size": 563358, "gs_citation": 28, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12514990851042210132&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "University of California at San Diego; Zillow", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/li17e.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of California, San Diego;Zillow", "aff_unique_dep": ";", "aff_unique_url": "https://ucsd.edu;https://www.zillow.com", "aff_unique_abbr": "UCSD;Zillow", "aff_campus_unique_index": "0", "aff_campus_unique": "San Diego;", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Forward and Reverse Gradient-Based Hyperparameter Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/704", "id": "704", "author_site": "Luca Franceschi, Michele Donini, Paolo Frasconi, Massimiliano Pontil", "author": "Luca Franceschi; Michele Donini; Paolo Frasconi; Massimiliano Pontil", "abstract": "We study two procedures (reverse-mode and forward-mode) for computing the gradient of the validation error with respect to the hyperparameters of any iterative learning algorithm such as stochastic gradient descent. These procedures mirror two ways of computing gradients for recurrent neural networks and have different trade-offs in terms of running time and space requirements. Our formulation of the reverse-mode procedure is linked to previous work by Maclaurin et al (2015) but does not require reversible dynamics. Additionally, we explore the use of constraints on the hyperparameters. The forward-mode procedure is suitable for real-time hyperparameter updates, which may significantly speedup hyperparameter optimization on large datasets. We present a series of experiments on image and phone classification tasks. In the second task, previous gradient-based approaches are prohibitive. We show that our real-time algorithm yields state-of-the-art results in affordable time.", "bibtex": "@InProceedings{pmlr-v70-franceschi17a,\n title = \t {Forward and Reverse Gradient-Based Hyperparameter Optimization},\n author = {Luca Franceschi and Michele Donini and Paolo Frasconi and Massimiliano Pontil},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1165--1173},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/franceschi17a/franceschi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/franceschi17a.html},\n abstract = \t {We study two procedures (reverse-mode and forward-mode) for computing the gradient of the validation error with respect to the hyperparameters of any iterative learning algorithm such as stochastic gradient descent. These procedures mirror two ways of computing gradients for recurrent neural networks and have different trade-offs in terms of running time and space requirements. Our formulation of the reverse-mode procedure is linked to previous work by Maclaurin et al (2015) but does not require reversible dynamics. Additionally, we explore the use of constraints on the hyperparameters. The forward-mode procedure is suitable for real-time hyperparameter updates, which may significantly speedup hyperparameter optimization on large datasets. We present a series of experiments on image and phone classification tasks. In the second task, previous gradient-based approaches are prohibitive. We show that our real-time algorithm yields state-of-the-art results in affordable time.}\n}", "pdf": "http://proceedings.mlr.press/v70/franceschi17a/franceschi17a.pdf", "supp": "", "pdf_size": 414581, "gs_citation": 569, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5902468308747277761&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Computational Statistics and Machine Learning, Istituto Italiano di Tecnologia, Genoa, Italy+Department of Computer Science, University College London, UK; Computational Statistics and Machine Learning, Istituto Italiano di Tecnologia, Genoa, Italy; Department of Information Engineering, Universit\u00e0 degli Studi di Firenze, Italy; Computational Statistics and Machine Learning, Istituto Italiano di Tecnologia, Genoa, Italy+Department of Computer Science, University College London, UK", "aff_domain": "iit.it; ; ; ", "email": "iit.it; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/franceschi17a.html", "aff_unique_index": "0+1;0;2;0+1", "aff_unique_norm": "Istituto Italiano di Tecnologia;University College London;Universit\u00e0 degli Studi di Firenze", "aff_unique_dep": "Computational Statistics and Machine Learning;Department of Computer Science;Department of Information Engineering", "aff_unique_url": "https://www.iit.it;https://www.ucl.ac.uk;https://www.unifi.it", "aff_unique_abbr": "IIT;UCL;", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Genoa;", "aff_country_unique_index": "0+1;0;0;0+1", "aff_country_unique": "Italy;United Kingdom" }, { "title": "Fractional Langevin Monte Carlo: Exploring Levy Driven Stochastic Differential Equations for Markov Chain Monte Carlo", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/462", "id": "462", "author_site": "Umut Simsekli", "author": "Umut \u015eim\u015fekli", "abstract": "Along with the recent advances in scalable Markov Chain Monte Carlo methods, sampling techniques that are based on Langevin diffusions have started receiving increasing attention. These so called Langevin Monte Carlo (LMC) methods are based on diffusions driven by a Brownian motion, which gives rise to Gaussian proposal distributions in the resulting algorithms. Even though these approaches have proven successful in many applications, their performance can be limited by the light-tailed nature of the Gaussian proposals. In this study, we extend classical LMC and develop a novel Fractional LMC (FLMC) framework that is based on a family of heavy-tailed distributions, called alpha-stable Levy distributions. As opposed to classical approaches, the proposed approach can possess large jumps while targeting the correct distribution, which would be beneficial for efficient exploration of the state space. We develop novel computational methods that can scale up to large-scale problems and we provide formal convergence analysis of the proposed scheme. Our experiments support our theory: FLMC can provide superior performance in multi-modal settings, improved convergence rates, and robustness to algorithm parameters.", "bibtex": "@InProceedings{pmlr-v70-simsekli17a,\n title = \t {Fractional {L}angevin {M}onte Carlo: Exploring {L}evy Driven Stochastic Differential Equations for {M}arkov Chain {M}onte {C}arlo},\n author = {Umut {\\c{S}}im{\\c{s}}ekli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3200--3209},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/simsekli17a/simsekli17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/simsekli17a.html},\n abstract = \t {Along with the recent advances in scalable Markov Chain Monte Carlo methods, sampling techniques that are based on Langevin diffusions have started receiving increasing attention. These so called Langevin Monte Carlo (LMC) methods are based on diffusions driven by a Brownian motion, which gives rise to Gaussian proposal distributions in the resulting algorithms. Even though these approaches have proven successful in many applications, their performance can be limited by the light-tailed nature of the Gaussian proposals. In this study, we extend classical LMC and develop a novel Fractional LMC (FLMC) framework that is based on a family of heavy-tailed distributions, called alpha-stable Levy distributions. As opposed to classical approaches, the proposed approach can possess large jumps while targeting the correct distribution, which would be beneficial for efficient exploration of the state space. We develop novel computational methods that can scale up to large-scale problems and we provide formal convergence analysis of the proposed scheme. Our experiments support our theory: FLMC can provide superior performance in multi-modal settings, improved convergence rates, and robustness to algorithm parameters.}\n}", "pdf": "http://proceedings.mlr.press/v70/simsekli17a/simsekli17a.pdf", "supp": "", "pdf_size": 535685, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5437138470560094005&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "LTCI, T \u00b4el\u00b4ecom ParisTech, Universit \u00b4e Paris-Saclay, 75013, Paris, France", "aff_domain": "telecom-paristech.fr", "email": "telecom-paristech.fr", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/simsekli17a.html", "aff_unique_index": "0", "aff_unique_norm": "T\u00e9l\u00e9com ParisTech", "aff_unique_dep": "LTCI", "aff_unique_url": "https://www.telecom-paris.fr", "aff_unique_abbr": "T\u00e9l\u00e9com ParisTech", "aff_campus_unique_index": "0", "aff_campus_unique": "Paris", "aff_country_unique_index": "0", "aff_country_unique": "France" }, { "title": "Frame-based Data Factorizations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/722", "id": "722", "author_site": "Sebastian Mair, Ahc\u00e8ne Boubekki, Ulf Brefeld", "author": "Sebastian Mair; Ahc\u00e8ne Boubekki; Ulf Brefeld", "abstract": "Archetypal Analysis is the method of choice to compute interpretable matrix factorizations. Every data point is represented as a convex combination of factors, i.e., points on the boundary of the convex hull of the data. This renders computation inefficient. In this paper, we show that the set of vertices of a convex hull, the so-called frame, can be efficiently computed by a quadratic program. We provide theoretical and empirical results for our proposed approach and make use of the frame to accelerate Archetypal Analysis. The novel method yields similar reconstruction errors as baseline competitors but is much faster to compute.", "bibtex": "@InProceedings{pmlr-v70-mair17a,\n title = \t {Frame-based Data Factorizations},\n author = {Sebastian Mair and Ahc{\\`e}ne Boubekki and Ulf Brefeld},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2305--2313},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mair17a/mair17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mair17a.html},\n abstract = \t {Archetypal Analysis is the method of choice to compute interpretable matrix factorizations. Every data point is represented as a convex combination of factors, i.e., points on the boundary of the convex hull of the data. This renders computation inefficient. In this paper, we show that the set of vertices of a convex hull, the so-called frame, can be efficiently computed by a quadratic program. We provide theoretical and empirical results for our proposed approach and make use of the frame to accelerate Archetypal Analysis. The novel method yields similar reconstruction errors as baseline competitors but is much faster to compute.}\n}", "pdf": "http://proceedings.mlr.press/v70/mair17a/mair17a.pdf", "supp": "", "pdf_size": 1203076, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1493531093736043122&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Leuphana University, L\u00fcneburg, Germany+German Institute for Educational Research, Frankfurt am Main, Germany; Leuphana University, L\u00fcneburg, Germany; Leuphana University, L\u00fcneburg, Germany", "aff_domain": "leuphana.de; ; ", "email": "leuphana.de; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mair17a.html", "aff_unique_index": "0+1;0;0", "aff_unique_norm": "Leuphana University;German Institute for Educational Research", "aff_unique_dep": ";", "aff_unique_url": "https://www.leuphana.de;", "aff_unique_abbr": "Leuphana;", "aff_campus_unique_index": "0+1;0;0", "aff_campus_unique": "L\u00fcneburg;Frankfurt am Main", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "Germany" }, { "title": "From Patches to Images: A Nonparametric Generative Model", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/753", "id": "753", "author_site": "Geng Ji, Michael Hughes, Erik Sudderth", "author": "Geng Ji; Michael C. Hughes; Erik B. Sudderth", "abstract": "We propose a hierarchical generative model that captures the self-similar structure of image regions as well as how this structure is shared across image collections. Our model is based on a novel, variational interpretation of the popular expected patch log-likelihood (EPLL) method as a model for randomly positioned grids of image patches. While previous EPLL methods modeled image patches with finite Gaussian mixtures, we use nonparametric Dirichlet process (DP) mixtures to create models whose complexity grows as additional images are observed. An extension based on the hierarchical DP then captures repetitive and self-similar structure via image-specific variations in cluster frequencies. We derive a structured variational inference algorithm that adaptively creates new patch clusters to more accurately model novel image textures. Our denoising performance on standard benchmarks is superior to EPLL and comparable to the state-of-the-art, and provides novel statistical justifications for common image processing heuristics. We also show accurate image inpainting results.", "bibtex": "@InProceedings{pmlr-v70-ji17a,\n title = \t {From Patches to Images: A Nonparametric Generative Model},\n author = {Geng Ji and Michael C. Hughes and Erik B. Sudderth},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1675--1683},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ji17a/ji17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ji17a.html},\n abstract = \t {We propose a hierarchical generative model that captures the self-similar structure of image regions as well as how this structure is shared across image collections. Our model is based on a novel, variational interpretation of the popular expected patch log-likelihood (EPLL) method as a model for randomly positioned grids of image patches. While previous EPLL methods modeled image patches with finite Gaussian mixtures, we use nonparametric Dirichlet process (DP) mixtures to create models whose complexity grows as additional images are observed. An extension based on the hierarchical DP then captures repetitive and self-similar structure via image-specific variations in cluster frequencies. We derive a structured variational inference algorithm that adaptively creates new patch clusters to more accurately model novel image textures. Our denoising performance on standard benchmarks is superior to EPLL and comparable to the state-of-the-art, and provides novel statistical justifications for common image processing heuristics. We also show accurate image inpainting results.}\n}", "pdf": "http://proceedings.mlr.press/v70/ji17a/ji17a.pdf", "supp": "", "pdf_size": 3996935, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6500973434081031988&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Brown University, Providence, RI, USA; Harvard University, Cambridge, MA, USA; University of California, Irvine, CA, USA", "aff_domain": "cs.brown.edu; ; ", "email": "cs.brown.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ji17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Brown University;Harvard University;University of California, Irvine", "aff_unique_dep": ";;", "aff_unique_url": "https://www.brown.edu;https://www.harvard.edu;https://www.uci.edu", "aff_unique_abbr": "Brown;Harvard;UCI", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Providence;Cambridge;Irvine", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "GSOS: Gauss-Seidel Operator Splitting Algorithm for Multi-Term Nonsmooth Convex Composite Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/477", "id": "477", "author_site": "Li Shen, Wei Liu, Ganzhao Yuan, Shiqian Ma", "author": "Li Shen; Wei Liu; Ganzhao Yuan; Shiqian Ma", "abstract": "In this paper, we propose a fast Gauss-Seidel Operator Splitting (GSOS) algorithm for addressing multi-term nonsmooth convex composite optimization, which has wide applications in machine learning, signal processing and statistics. The proposed GSOS algorithm inherits the advantage of the Gauss-Seidel technique to accelerate the optimization procedure, and leverages the operator splitting technique to reduce the computational complexity. In addition, we develop a new technique to establish the global convergence of the GSOS algorithm. To be specific, we first reformulate the iterations of GSOS as a two-step iterations algorithm by employing the tool of operator optimization theory. Subsequently, we establish the convergence of GSOS based on the two-step iterations algorithm reformulation. At last, we apply the proposed GSOS algorithm to solve overlapping group Lasso and graph-guided fused Lasso problems. Numerical experiments show that our proposed GSOS algorithm is superior to the state-of-the-art algorithms in terms of both efficiency and effectiveness.", "bibtex": "@InProceedings{pmlr-v70-shen17b,\n title = \t {{GSOS}: {G}auss-{S}eidel Operator Splitting Algorithm for Multi-Term Nonsmooth Convex Composite Optimization},\n author = {Li Shen and Wei Liu and Ganzhao Yuan and Shiqian Ma},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3125--3134},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shen17b/shen17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/shen17b.html},\n abstract = \t {In this paper, we propose a fast Gauss-Seidel Operator Splitting (GSOS) algorithm for addressing multi-term nonsmooth convex composite optimization, which has wide applications in machine learning, signal processing and statistics. The proposed GSOS algorithm inherits the advantage of the Gauss-Seidel technique to accelerate the optimization procedure, and leverages the operator splitting technique to reduce the computational complexity. In addition, we develop a new technique to establish the global convergence of the GSOS algorithm. To be specific, we first reformulate the iterations of GSOS as a two-step iterations algorithm by employing the tool of operator optimization theory. Subsequently, we establish the convergence of GSOS based on the two-step iterations algorithm reformulation. At last, we apply the proposed GSOS algorithm to solve overlapping group Lasso and graph-guided fused Lasso problems. Numerical experiments show that our proposed GSOS algorithm is superior to the state-of-the-art algorithms in terms of both efficiency and effectiveness.}\n}", "pdf": "http://proceedings.mlr.press/v70/shen17b/shen17b.pdf", "supp": "", "pdf_size": 597680, "gs_citation": 7, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9407276439589768181&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Tencent AI Lab, China; Tencent AI Lab, China; Sun Yat-sen University, China; The Chinese University of Hong Kong, China", "aff_domain": "gmail.com;ee.columbia.edu; ; ", "email": "gmail.com;ee.columbia.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/shen17b.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "Tencent;Sun Yat-sen University;Chinese University of Hong Kong", "aff_unique_dep": "Tencent AI Lab;;", "aff_unique_url": "https://ai.tencent.com;http://www.sysu.edu.cn;https://www.cuhk.edu.hk", "aff_unique_abbr": "Tencent AI Lab;SYSU;CUHK", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Generalization and Equilibrium in Generative Adversarial Nets (GANs)", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/854", "id": "854", "author_site": "Sanjeev Arora, Rong Ge, Yingyu Liang, Tengyu Ma, Yi Zhang", "author": "Sanjeev Arora; Rong Ge; Yingyu Liang; Tengyu Ma; Yi Zhang", "abstract": "It is shown that training of generative adversarial network (GAN) may not have good generalization properties; e.g., training may appear successful but the trained distribution may be far from target distribution in standard metrics. However, generalization does occur for a weaker metric called neural net distance. It is also shown that an approximate pure equilibrium exists in the discriminator/generator game for a natural training objective (Wasserstein) when generator capacity and training set sizes are moderate. This existence of equilibrium inspires MIX+GAN protocol, which can be combined with any existing GAN training, and empirically shown to improve some of them.", "bibtex": "@InProceedings{pmlr-v70-arora17a,\n title = \t {Generalization and Equilibrium in Generative Adversarial Nets ({GAN}s)},\n author = {Sanjeev Arora and Rong Ge and Yingyu Liang and Tengyu Ma and Yi Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {224--232},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/arora17a/arora17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/arora17a.html},\n abstract = \t {It is shown that training of generative adversarial network (GAN) may not have good generalization properties; e.g., training may appear successful but the trained distribution may be far from target distribution in standard metrics. However, generalization does occur for a weaker metric called neural net distance. It is also shown that an approximate pure equilibrium exists in the discriminator/generator game for a natural training objective (Wasserstein) when generator capacity and training set sizes are moderate. This existence of equilibrium inspires MIX+GAN protocol, which can be combined with any existing GAN training, and empirically shown to improve some of them.}\n}", "pdf": "http://proceedings.mlr.press/v70/arora17a/arora17a.pdf", "supp": "", "pdf_size": 1822356, "gs_citation": 867, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11124082639758751800&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Princeton University; Duke University; Princeton University; Princeton University; Princeton University", "aff_domain": "cs.duke.edu;cs.princeton.edu; ; ; ", "email": "cs.duke.edu;cs.princeton.edu; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/arora17a.html", "aff_unique_index": "0;1;0;0;0", "aff_unique_norm": "Princeton University;Duke University", "aff_unique_dep": ";", "aff_unique_url": "https://www.princeton.edu;https://www.duke.edu", "aff_unique_abbr": "Princeton;Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Geometry of Neural Network Loss Surfaces via Random Matrix Theory", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/655", "id": "655", "author_site": "Jeffrey Pennington, Yasaman Bahri", "author": "Jeffrey Pennington; Yasaman Bahri", "abstract": "Understanding the geometry of neural network loss surfaces is important for the development of improved optimization algorithms and for building a theoretical understanding of why deep learning works. In this paper, we study the geometry in terms of the distribution of eigenvalues of the Hessian matrix at critical points of varying energy. We introduce an analytical framework and a set of tools from random matrix theory that allow us to compute an approximation of this distribution under a set of simplifying assumptions. The shape of the spectrum depends strongly on the energy and another key parameter, $\\phi$, which measures the ratio of parameters to data points. Our analysis predicts and numerical simulations support that for critical points of small index, the number of negative eigenvalues scales like the 3/2 power of the energy. We leave as an open problem an explanation for our observation that, in the context of a certain memorization task, the energy of minimizers is well-approximated by the function $1/2(1-\\phi)^2$.", "bibtex": "@InProceedings{pmlr-v70-pennington17a,\n title = \t {Geometry of Neural Network Loss Surfaces via Random Matrix Theory},\n author = {Jeffrey Pennington and Yasaman Bahri},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2798--2806},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pennington17a/pennington17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pennington17a.html},\n abstract = \t {Understanding the geometry of neural network loss surfaces is important for the development of improved optimization algorithms and for building a theoretical understanding of why deep learning works. In this paper, we study the geometry in terms of the distribution of eigenvalues of the Hessian matrix at critical points of varying energy. We introduce an analytical framework and a set of tools from random matrix theory that allow us to compute an approximation of this distribution under a set of simplifying assumptions. The shape of the spectrum depends strongly on the energy and another key parameter, $\\phi$, which measures the ratio of parameters to data points. Our analysis predicts and numerical simulations support that for critical points of small index, the number of negative eigenvalues scales like the 3/2 power of the energy. We leave as an open problem an explanation for our observation that, in the context of a certain memorization task, the energy of minimizers is well-approximated by the function $1/2(1-\\phi)^2$.}\n}", "pdf": "http://proceedings.mlr.press/v70/pennington17a/pennington17a.pdf", "supp": "", "pdf_size": 1769157, "gs_citation": 178, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14646076828812294654&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google Brain; Google Brain", "aff_domain": "google.com; ", "email": "google.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/pennington17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Global optimization of Lipschitz functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/717", "id": "717", "author_site": "Cedric Malherbe, Nicolas Vayatis", "author": "C\u00e9dric Malherbe; Nicolas Vayatis", "abstract": "The goal of the paper is to design sequential strategies which lead to efficient optimization of an unknown function under the only assumption that it has a finite Lipschitz constant. We first identify sufficient conditions for the consistency of generic sequential algorithms and formulate the expected minimax rate for their performance. We introduce and analyze a first algorithm called LIPO which assumes the Lipschitz constant to be known. Consistency, minimax rates for LIPO are proved, as well as fast rates under an additional H\u00f6lder like condition. An adaptive version of LIPO is also introduced for the more realistic setup where Lipschitz constant is unknown and has to be estimated along with the optimization. Similar theoretical guarantees are shown to hold for the adaptive LIPO algorithm and a numerical assessment is provided at the end of the paper to illustrate the potential of this strategy with respect to state-of-the-art methods over typical benchmark problems for global optimization.", "bibtex": "@InProceedings{pmlr-v70-malherbe17a,\n title = \t {Global optimization of {L}ipschitz functions},\n author = {C{\\'e}dric Malherbe and Nicolas Vayatis},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2314--2323},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/malherbe17a/malherbe17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/malherbe17a.html},\n abstract = \t {The goal of the paper is to design sequential strategies which lead to efficient optimization of an unknown function under the only assumption that it has a finite Lipschitz constant. We first identify sufficient conditions for the consistency of generic sequential algorithms and formulate the expected minimax rate for their performance. We introduce and analyze a first algorithm called LIPO which assumes the Lipschitz constant to be known. Consistency, minimax rates for LIPO are proved, as well as fast rates under an additional H\u00f6lder like condition. An adaptive version of LIPO is also introduced for the more realistic setup where Lipschitz constant is unknown and has to be estimated along with the optimization. Similar theoretical guarantees are shown to hold for the adaptive LIPO algorithm and a numerical assessment is provided at the end of the paper to illustrate the potential of this strategy with respect to state-of-the-art methods over typical benchmark problems for global optimization.}\n}", "pdf": "http://proceedings.mlr.press/v70/malherbe17a/malherbe17a.pdf", "supp": "", "pdf_size": 493031, "gs_citation": 153, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15083760441503974987&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "CMLA, ENS Cachan, CNRS, Universit \u00b4e Paris-Saclay, 94235, Cachan, France; CMLA, ENS Cachan, CNRS, Universit \u00b4e Paris-Saclay, 94235, Cachan, France", "aff_domain": "cmla.ens-cachan.fr;cmla.ens-cachan.fr", "email": "cmla.ens-cachan.fr;cmla.ens-cachan.fr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/malherbe17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "\u00c9cole Normale Sup\u00e9rieure de Cachan", "aff_unique_dep": "CMLA", "aff_unique_url": "https://www.ens-cachan.fr", "aff_unique_abbr": "ENS Cachan", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Cachan", "aff_country_unique_index": "0;0", "aff_country_unique": "France" }, { "title": "Globally Induced Forest: A Prepruning Compression Scheme", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/802", "id": "802", "author_site": "Jean-Michel Begon, Arnaud Joly, Pierre Geurts", "author": "Jean-Michel Begon; Arnaud Joly; Pierre Geurts", "abstract": "Tree-based ensemble models are heavy memory-wise. An undesired state of affairs considering nowadays datasets, memory-constrained environment and fitting/prediction times. In this paper, we propose the Globally Induced Forest (GIF) to remedy this problem. GIF is a fast prepruning approach to build lightweight ensembles by iteratively deepening the current forest. It mixes local and global optimizations to produce accurate predictions under memory constraints in reasonable time. We show that the proposed method is more than competitive with standard tree-based ensembles under corresponding constraints, and can sometimes even surpass much larger models.", "bibtex": "@InProceedings{pmlr-v70-begon17a,\n title = \t {Globally Induced Forest: A Prepruning Compression Scheme},\n author = {Jean-Michel Begon and Arnaud Joly and Pierre Geurts},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {420--428},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/begon17a/begon17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/begon17a.html},\n abstract = \t {Tree-based ensemble models are heavy memory-wise. An undesired state of affairs considering nowadays datasets, memory-constrained environment and fitting/prediction times. In this paper, we propose the Globally Induced Forest (GIF) to remedy this problem. GIF is a fast prepruning approach to build lightweight ensembles by iteratively deepening the current forest. It mixes local and global optimizations to produce accurate predictions under memory constraints in reasonable time. We show that the proposed method is more than competitive with standard tree-based ensembles under corresponding constraints, and can sometimes even surpass much larger models.}\n}", "pdf": "http://proceedings.mlr.press/v70/begon17a/begon17a.pdf", "supp": "", "pdf_size": 583033, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3768379930317006077&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Electrical Engineering and Computer Science University of Li\u00e8ge, Li\u00e8ge, Belgium; Department of Electrical Engineering and Computer Science University of Li\u00e8ge, Li\u00e8ge, Belgium; Department of Electrical Engineering and Computer Science University of Li\u00e8ge, Li\u00e8ge, Belgium", "aff_domain": "ulg.ac.be; ;ulg.ac.be", "email": "ulg.ac.be; ;ulg.ac.be", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/begon17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Li\u00e8ge", "aff_unique_dep": "Department of Electrical Engineering and Computer Science", "aff_unique_url": "https://www.ulg.ac.be", "aff_unique_abbr": "ULi\u00e8ge", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Li\u00e8ge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Belgium" }, { "title": "Globally Optimal Gradient Descent for a ConvNet with Gaussian Inputs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/571", "id": "571", "author_site": "Alon Brutzkus, Amir Globerson", "author": "Alon Brutzkus; Amir Globerson", "abstract": "Deep learning models are often successfully trained using gradient descent, despite the worst case hardness of the underlying non-convex optimization problem. The key question is then under what conditions can one prove that optimization will succeed. Here we provide a strong result of this kind. We consider a neural net with one hidden layer and a convolutional structure with no overlap and a ReLU activation function. For this architecture we show that learning is NP-complete in the general case, but that when the input distribution is Gaussian, gradient descent converges to the global optimum in polynomial time. To the best of our knowledge, this is the first global optimality guarantee of gradient descent on a convolutional neural network with ReLU activations.", "bibtex": "@InProceedings{pmlr-v70-brutzkus17a,\n title = \t {Globally Optimal Gradient Descent for a {C}onv{N}et with {G}aussian Inputs},\n author = {Alon Brutzkus and Amir Globerson},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {605--614},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/brutzkus17a/brutzkus17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/brutzkus17a.html},\n abstract = \t {Deep learning models are often successfully trained using gradient descent, despite the worst case hardness of the underlying non-convex optimization problem. The key question is then under what conditions can one prove that optimization will succeed. Here we provide a strong result of this kind. We consider a neural net with one hidden layer and a convolutional structure with no overlap and a ReLU activation function. For this architecture we show that learning is NP-complete in the general case, but that when the input distribution is Gaussian, gradient descent converges to the global optimum in polynomial time. To the best of our knowledge, this is the first global optimality guarantee of gradient descent on a convolutional neural network with ReLU activations.}\n}", "pdf": "http://proceedings.mlr.press/v70/brutzkus17a/brutzkus17a.pdf", "supp": "", "pdf_size": 761926, "gs_citation": 340, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5580029596128142672&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Tel Aviv University, Blavatnik School of Computer Science; Tel Aviv University, Blavatnik School of Computer Science", "aff_domain": "mail.tau.ac.il;cs.tau.ac.il", "email": "mail.tau.ac.il;cs.tau.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/brutzkus17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Tel Aviv University", "aff_unique_dep": "Blavatnik School of Computer Science", "aff_unique_url": "https://www.tau.ac.il", "aff_unique_abbr": "TAU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Gradient Boosted Decision Trees for High Dimensional Sparse Output", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/870", "id": "870", "author_site": "Si Si, Huan Zhang, Sathiya Keerthi, Dhruv Mahajan, Inderjit Dhillon, Cho-Jui Hsieh", "author": "Si Si; Huan Zhang; S. Sathiya Keerthi; Dhruv Mahajan; Inderjit S. Dhillon; Cho-Jui Hsieh", "abstract": "In this paper, we study the gradient boosted decision trees (GBDT) when the output space is high dimensional and sparse. For example, in multilabel classification, the output space is a $L$-dimensional 0/1 vector, where $L$ is number of labels that can grow to millions and beyond in many modern applications. We show that vanilla GBDT can easily run out of memory or encounter near-forever running time in this regime, and propose a new GBDT variant, GBDT-SPARSE, to resolve this problem by employing $L_0$ regularization. We then discuss in detail how to utilize this sparsity to conduct GBDT training, including splitting the nodes, computing the sparse residual, and predicting in sublinear time. Finally, we apply our algorithm to extreme multilabel classification problems, and show that the proposed GBDT-SPARSE achieves an order of magnitude improvements in model size and prediction time over existing methods, while yielding similar performance.", "bibtex": "@InProceedings{pmlr-v70-si17a,\n title = \t {Gradient Boosted Decision Trees for High Dimensional Sparse Output},\n author = {Si Si and Huan Zhang and S. Sathiya Keerthi and Dhruv Mahajan and Inderjit S. Dhillon and Cho-Jui Hsieh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3182--3190},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/si17a/si17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/si17a.html},\n abstract = \t {In this paper, we study the gradient boosted decision trees (GBDT) when the output space is high dimensional and sparse. For example, in multilabel classification, the output space is a $L$-dimensional 0/1 vector, where $L$ is number of labels that can grow to millions and beyond in many modern applications. We show that vanilla GBDT can easily run out of memory or encounter near-forever running time in this regime, and propose a new GBDT variant, GBDT-SPARSE, to resolve this problem by employing $L_0$ regularization. We then discuss in detail how to utilize this sparsity to conduct GBDT training, including splitting the nodes, computing the sparse residual, and predicting in sublinear time. Finally, we apply our algorithm to extreme multilabel classification problems, and show that the proposed GBDT-SPARSE achieves an order of magnitude improvements in model size and prediction time over existing methods, while yielding similar performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/si17a/si17a.pdf", "supp": "", "pdf_size": 664102, "gs_citation": 186, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9708414612139092491&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": ";;;;;", "aff_domain": ";;;;;", "email": ";;;;;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/si17a.html" }, { "title": "Gradient Coding: Avoiding Stragglers in Distributed Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/851", "id": "851", "author_site": "Rashish Tandon, Qi Lei, Alexandros Dimakis, Nikos Karampatziakis", "author": "Rashish Tandon; Qi Lei; Alexandros G. Dimakis; Nikos Karampatziakis", "abstract": "We propose a novel coding theoretic framework for mitigating stragglers in distributed learning. We show how carefully replicating data blocks and coding across gradients can provide tolerance to failures and stragglers for synchronous Gradient Descent. We implement our schemes in python (using MPI) to run on Amazon EC2, and show how we compare against baseline approaches in running time and generalization error.", "bibtex": "@InProceedings{pmlr-v70-tandon17a,\n title = \t {Gradient Coding: Avoiding Stragglers in Distributed Learning},\n author = {Rashish Tandon and Qi Lei and Alexandros G. Dimakis and Nikos Karampatziakis},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3368--3376},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tandon17a/tandon17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tandon17a.html},\n abstract = \t {We propose a novel coding theoretic framework for mitigating stragglers in distributed learning. We show how carefully replicating data blocks and coding across gradients can provide tolerance to failures and stragglers for synchronous Gradient Descent. We implement our schemes in python (using MPI) to run on Amazon EC2, and show how we compare against baseline approaches in running time and generalization error.}\n}", "pdf": "http://proceedings.mlr.press/v70/tandon17a/tandon17a.pdf", "supp": "", "pdf_size": 3262296, "gs_citation": 590, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13346158632495346071&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, University of Texas at Austin; Institute for Computational Engineering and Sciences, University of Texas at Austin; Department of Electrical and Computer Engineering, University of Texas at Austin; Microsoft, Seattle, WA, USA", "aff_domain": "cs.utexas.edu; ; ; ", "email": "cs.utexas.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/tandon17a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of Texas at Austin;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Corporation", "aff_unique_url": "https://www.utexas.edu;https://www.microsoft.com", "aff_unique_abbr": "UT Austin;Microsoft", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Austin;Seattle", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Gradient Projection Iterative Sketch for Large-Scale Constrained Least-Squares", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/578", "id": "578", "author_site": "Junqi Tang, Mohammad Golbabaee, Michael E Davies", "author": "Junqi Tang; Mohammad Golbabaee; Mike E. Davies", "abstract": "We propose a randomized first order optimization algorithm Gradient Projection Iterative Sketch (GPIS) and an accelerated variant for efficiently solving large scale constrained Least Squares (LS). We provide the first theoretical convergence analysis for both algorithms. An efficient implementation using a tailored line-search scheme is also proposed. We demonstrate our methods\u2019 computational efficiency compared to the classical accelerated gradient method, and the variance-reduced stochastic gradient methods through numerical experiments in various large synthetic/real data sets.", "bibtex": "@InProceedings{pmlr-v70-tang17a,\n title = \t {Gradient Projection Iterative Sketch for Large-Scale Constrained Least-Squares},\n author = {Junqi Tang and Mohammad Golbabaee and Mike E. Davies},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3377--3386},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tang17a/tang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tang17a.html},\n abstract = \t {We propose a randomized first order optimization algorithm Gradient Projection Iterative Sketch (GPIS) and an accelerated variant for efficiently solving large scale constrained Least Squares (LS). We provide the first theoretical convergence analysis for both algorithms. An efficient implementation using a tailored line-search scheme is also proposed. We demonstrate our methods\u2019 computational efficiency compared to the classical accelerated gradient method, and the variance-reduced stochastic gradient methods through numerical experiments in various large synthetic/real data sets.}\n}", "pdf": "http://proceedings.mlr.press/v70/tang17a/tang17a.pdf", "supp": "", "pdf_size": 378523, "gs_citation": 43, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14572819103133279208&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Institute for Digital Communications, the University of Edinburgh, Edinburgh, UK; Institute for Digital Communications, the University of Edinburgh, Edinburgh, UK; Institute for Digital Communications, the University of Edinburgh, Edinburgh, UK", "aff_domain": "ed.ac.uk; ; ", "email": "ed.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/tang17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Edinburgh", "aff_unique_dep": "Institute for Digital Communications", "aff_unique_url": "https://www.ed.ac.uk", "aff_unique_abbr": "Edinburgh", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Edinburgh", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Gram-CTC: Automatic Unit Selection and Target Decomposition for Sequence Labelling", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/742", "id": "742", "author_site": "Hairong Liu, Zhenyao Zhu, Xiangang Li, Sanjeev Satheesh", "author": "Hairong Liu; Zhenyao Zhu; Xiangang Li; Sanjeev Satheesh", "abstract": "Most existing sequence labelling models rely on a fixed decomposition of a target sequence into a sequence of basic units. These methods suffer from two major drawbacks: $1$) the set of basic units is fixed, such as the set of words, characters or phonemes in speech recognition, and $2$) the decomposition of target sequences is fixed. These drawbacks usually result in sub-optimal performance of modeling sequences. In this paper, we extend the popular CTC loss criterion to alleviate these limitations, and propose a new loss function called", "bibtex": "@InProceedings{pmlr-v70-liu17f,\n title = \t {{G}ram-{CTC}: Automatic Unit Selection and Target Decomposition for Sequence Labelling},\n author = {Hairong Liu and Zhenyao Zhu and Xiangang Li and Sanjeev Satheesh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2188--2197},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17f/liu17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17f.html},\n abstract = \t {Most existing sequence labelling models rely on a fixed decomposition of a target sequence into a sequence of basic units. These methods suffer from two major drawbacks: $1$) the set of basic units is fixed, such as the set of words, characters or phonemes in speech recognition, and $2$) the decomposition of target sequences is fixed. These drawbacks usually result in sub-optimal performance of modeling sequences. In this paper, we extend the popular CTC loss criterion to alleviate these limitations, and propose a new loss function called", "pdf": "http://proceedings.mlr.press/v70/liu17f/liu17f.pdf", "supp": "", "pdf_size": 354114, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12903537299403652007&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Baidu Silicon Valley AI Lab; Baidu Silicon Valley AI Lab; Baidu Silicon Valley AI Lab; Baidu Silicon Valley AI Lab", "aff_domain": "baidu.com; ; ; ", "email": "baidu.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/liu17f.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Baidu", "aff_unique_dep": "Baidu AI Lab", "aff_unique_url": "https://baidu.com", "aff_unique_abbr": "Baidu", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Silicon Valley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Grammar Variational Autoencoder", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/859", "id": "859", "author_site": "Matt J. Kusner, Brooks Paige, Jose Miguel Hernandez-Lobato", "author": "Matt J. Kusner; Brooks Paige; Jos\u00e9 Miguel Hern\u00e1ndez-Lobato", "abstract": "Deep generative models have been wildly successful at learning coherent latent representations for continuous data such as natural images, artwork, and audio. However, generative modeling of discrete data such as arithmetic expressions and molecular structures still poses significant challenges. Crucially, state-of-the-art methods often produce outputs that are not valid. We make the key observation that frequently, discrete data can be represented as a parse tree from a context-free grammar. We propose a variational autoencoder which directly encodes from and decodes to these parse trees, ensuring the generated outputs are always syntactically valid. Surprisingly, we show that not only does our model more often generate valid outputs, it also learns a more coherent latent space in which nearby points decode to similar discrete outputs. We demonstrate the effectiveness of our learned models by showing their improved performance in Bayesian optimization for symbolic regression and molecule generation.", "bibtex": "@InProceedings{pmlr-v70-kusner17a,\n title = \t {Grammar Variational Autoencoder},\n author = {Matt J. Kusner and Brooks Paige and Jos{\\'e} Miguel Hern{\\'a}ndez-Lobato},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1945--1954},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kusner17a/kusner17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kusner17a.html},\n abstract = \t {Deep generative models have been wildly successful at learning coherent latent representations for continuous data such as natural images, artwork, and audio. However, generative modeling of discrete data such as arithmetic expressions and molecular structures still poses significant challenges. Crucially, state-of-the-art methods often produce outputs that are not valid. We make the key observation that frequently, discrete data can be represented as a parse tree from a context-free grammar. We propose a variational autoencoder which directly encodes from and decodes to these parse trees, ensuring the generated outputs are always syntactically valid. Surprisingly, we show that not only does our model more often generate valid outputs, it also learns a more coherent latent space in which nearby points decode to similar discrete outputs. We demonstrate the effectiveness of our learned models by showing their improved performance in Bayesian optimization for symbolic regression and molecule generation.}\n}", "pdf": "http://proceedings.mlr.press/v70/kusner17a/kusner17a.pdf", "supp": "", "pdf_size": 902945, "gs_citation": 1256, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4080460899049502885&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Alan Turing Institute+University of Warwick; Alan Turing Institute+University of Warwick; University of Cambridge", "aff_domain": "turing.ac.uk;turing.ac.uk;cam.ac.uk", "email": "turing.ac.uk;turing.ac.uk;cam.ac.uk", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kusner17a.html", "aff_unique_index": "0+1;0+1;2", "aff_unique_norm": "Alan Turing Institute;University of Warwick;University of Cambridge", "aff_unique_dep": ";;", "aff_unique_url": "https://www.turing.ac.uk;https://www.warwick.ac.uk;https://www.cam.ac.uk", "aff_unique_abbr": "ATI;Warwick;Cambridge", "aff_campus_unique_index": ";;1", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United Kingdom" }, { "title": "Graph-based Isometry Invariant Representation Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/597", "id": "597", "author_site": "Renata Khasanova, Pascal Frossard", "author": "Renata Khasanova; Pascal Frossard", "abstract": "Learning transformation invariant representations of visual data is an important problem in computer vision. Deep convolutional networks have demonstrated remarkable results for image and video classification tasks. However, they have achieved only limited success in the classification of images that undergo geometric transformations. In this work we present a novel Transformation Invariant Graph-based Network (TIGraNet), which learns graph-based features that are inherently invariant to isometric transformations such as rotation and translation of input images. In particular, images are represented as signals on graphs, which permits to replace classical convolution and pooling layers in deep networks with graph spectral convolution and dynamic graph pooling layers that together contribute to invariance to isometric transformation. Our experiments show high performance on rotated and translated images from the test set compared to classical architectures that are very sensitive to transformations in the data. The inherent invariance properties of our framework provide key advantages, such as increased resiliency to data variability and sustained performance with limited training sets.", "bibtex": "@InProceedings{pmlr-v70-khasanova17a,\n title = \t {Graph-based Isometry Invariant Representation Learning},\n author = {Renata Khasanova and Pascal Frossard},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1847--1856},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/khasanova17a/khasanova17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/khasanova17a.html},\n abstract = \t {Learning transformation invariant representations of visual data is an important problem in computer vision. Deep convolutional networks have demonstrated remarkable results for image and video classification tasks. However, they have achieved only limited success in the classification of images that undergo geometric transformations. In this work we present a novel Transformation Invariant Graph-based Network (TIGraNet), which learns graph-based features that are inherently invariant to isometric transformations such as rotation and translation of input images. In particular, images are represented as signals on graphs, which permits to replace classical convolution and pooling layers in deep networks with graph spectral convolution and dynamic graph pooling layers that together contribute to invariance to isometric transformation. Our experiments show high performance on rotated and translated images from the test set compared to classical architectures that are very sensitive to transformations in the data. The inherent invariance properties of our framework provide key advantages, such as increased resiliency to data variability and sustained performance with limited training sets.}\n}", "pdf": "http://proceedings.mlr.press/v70/khasanova17a/khasanova17a.pdf", "supp": "", "pdf_size": 544210, "gs_citation": 67, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1632873965221683759&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL), Lausanne, Switzerland; Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne (EPFL), Lausanne, Switzerland", "aff_domain": "epfl.ch;epfl.ch", "email": "epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/khasanova17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Lausanne", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Guarantees for Greedy Maximization of Non-submodular Functions with Applications", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/521", "id": "521", "author_site": "Yatao Bian, Joachim Buhmann, Andreas Krause, Sebastian Tschiatschek", "author": "Andrew An Bian; Joachim M. Buhmann; Andreas Krause; Sebastian Tschiatschek", "abstract": "We investigate the performance of the standard Greedy algorithm for cardinality constrained maximization of non-submodular nondecreasing set functions. While there are strong theoretical guarantees on the performance of Greedy for maximizing submodular functions, there are few guarantees for non-submodular ones. However, Greedy enjoys strong empirical performance for many important non-submodular functions, e.g., the Bayesian A-optimality objective in experimental design. We prove theoretical guarantees supporting the empirical performance. Our guarantees are characterized by a combination of the (generalized) curvature $\\alpha$ and the submodularity ratio $\\gamma$. In particular, we prove that Greedy enjoys a tight approximation guarantee of $\\frac{1}{\\alpha}(1- e^{-\\gamma\\alpha})$ for cardinality constrained maximization. In addition, we bound the submodularity ratio and curvature for several important real-world objectives, including the Bayesian A-optimality objective, the determinantal function of a square submatrix and certain linear programs with combinatorial constraints. We experimentally validate our theoretical findings for both synthetic and real-world applications.", "bibtex": "@InProceedings{pmlr-v70-bian17a,\n title = \t {Guarantees for Greedy Maximization of Non-submodular Functions with Applications},\n author = {Andrew An Bian and Joachim M. Buhmann and Andreas Krause and Sebastian Tschiatschek},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {498--507},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bian17a/bian17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bian17a.html},\n abstract = \t {We investigate the performance of the standard Greedy algorithm for cardinality constrained maximization of non-submodular nondecreasing set functions. While there are strong theoretical guarantees on the performance of Greedy for maximizing submodular functions, there are few guarantees for non-submodular ones. However, Greedy enjoys strong empirical performance for many important non-submodular functions, e.g., the Bayesian A-optimality objective in experimental design. We prove theoretical guarantees supporting the empirical performance. Our guarantees are characterized by a combination of the (generalized) curvature $\\alpha$ and the submodularity ratio $\\gamma$. In particular, we prove that Greedy enjoys a tight approximation guarantee of $\\frac{1}{\\alpha}(1- e^{-\\gamma\\alpha})$ for cardinality constrained maximization. In addition, we bound the submodularity ratio and curvature for several important real-world objectives, including the Bayesian A-optimality objective, the determinantal function of a square submatrix and certain linear programs with combinatorial constraints. We experimentally validate our theoretical findings for both synthetic and real-world applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/bian17a/bian17a.pdf", "supp": "", "pdf_size": 1076032, "gs_citation": 311, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13860517534640072019&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland; Department of Computer Science, ETH Zurich, Zurich, Switzerland", "aff_domain": "inf.ethz.ch;ethz.ch; ; ", "email": "inf.ethz.ch;ethz.ch; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bian17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Zurich", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Hierarchy Through Composition with Multitask LMDPs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/725", "id": "725", "author_site": "Andrew Saxe, Adam Earle, Benjamin Rosman", "author": "Andrew M. Saxe; Adam C. Earle; Benjamin Rosman", "abstract": "Hierarchical architectures are critical to the scalability of reinforcement learning methods. Most current hierarchical frameworks execute actions serially, with macro-actions comprising sequences of primitive actions. We propose a novel alternative to these control hierarchies based on concurrent execution of many actions in parallel. Our scheme exploits the guaranteed concurrent compositionality provided by the linearly solvable Markov decision process (LMDP) framework, which naturally enables a learning agent to draw on several macro-actions simultaneously to solve new tasks. We introduce the Multitask LMDP module, which maintains a parallel distributed representation of tasks and may be stacked to form deep hierarchies abstracted in space and time.", "bibtex": "@InProceedings{pmlr-v70-saxe17a,\n title = \t {Hierarchy Through Composition with Multitask {LMDP}s},\n author = {Andrew M. Saxe and Adam C. Earle and Benjamin Rosman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3017--3026},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/saxe17a/saxe17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/saxe17a.html},\n abstract = \t {Hierarchical architectures are critical to the scalability of reinforcement learning methods. Most current hierarchical frameworks execute actions serially, with macro-actions comprising sequences of primitive actions. We propose a novel alternative to these control hierarchies based on concurrent execution of many actions in parallel. Our scheme exploits the guaranteed concurrent compositionality provided by the linearly solvable Markov decision process (LMDP) framework, which naturally enables a learning agent to draw on several macro-actions simultaneously to solve new tasks. We introduce the Multitask LMDP module, which maintains a parallel distributed representation of tasks and may be stacked to form deep hierarchies abstracted in space and time.}\n}", "pdf": "http://proceedings.mlr.press/v70/saxe17a/saxe17a.pdf", "supp": "", "pdf_size": 2177978, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11768598959520152927&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Center for Brain Science, Harvard University; School of Computer Science and Applied Mathematics, University of the Witwatersrand; School of Computer Science and Applied Mathematics, University of the Witwatersrand + Council for Scientific and Industrial Research, South Africa", "aff_domain": "fas.harvard.edu; ; ", "email": "fas.harvard.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/saxe17a.html", "aff_unique_index": "0;1;1+2", "aff_unique_norm": "Harvard University;University of the Witwatersrand;Council for Scientific and Industrial Research", "aff_unique_dep": "Center for Brain Science;School of Computer Science and Applied Mathematics;", "aff_unique_url": "https://www.harvard.edu;https://www.wits.ac.za;https://www.csir.co.za", "aff_unique_abbr": "Harvard;Wits;CSIR", "aff_campus_unique_index": "0;", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;1;1+1", "aff_country_unique": "United States;South Africa" }, { "title": "High Dimensional Bayesian Optimization with Elastic Gaussian Process", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/554", "id": "554", "author_site": "Santu Rana, Cheng Li, Sunil Gupta, Vu Nguyen, Svetha Venkatesh", "author": "Santu Rana; Cheng Li; Sunil Gupta; Vu Nguyen; Svetha Venkatesh", "abstract": "Bayesian optimization is an efficient way to optimize expensive black-box functions such as designing a new product with highest quality or hyperparameter tuning of a machine learning algorithm. However, it has a serious limitation when the parameter space is high-dimensional as Bayesian optimization crucially depends on solving a global optimization of a surrogate utility function in the same sized dimensions. The surrogate utility function, known commonly as acquisition function is a continuous function but can be extremely sharp at high dimension - having only a few peaks marooned in a large terrain of almost flat surface. Global optimization algorithms such as DIRECT are infeasible at higher dimensions and gradient-dependent methods cannot move if initialized in the flat terrain. We propose an algorithm that enables local gradient-dependent algorithms to move through the flat terrain by using a sequence of gross-to-finer Gaussian process priors on the objective function as we leverage two underlying facts - a) there exists a large enough length-scales for which the acquisition function can be made to have a significant gradient at any location in the parameter space, and b) the extrema of the consecutive acquisition functions are close although they are different only due to a small difference in the length-scales. Theoretical guarantees are provided and experiments clearly demonstrate the utility of the proposed method at high dimension using both benchmark test functions and real-world case studies.", "bibtex": "@InProceedings{pmlr-v70-rana17a,\n title = \t {High Dimensional {B}ayesian Optimization with Elastic {G}aussian Process},\n author = {Santu Rana and Cheng Li and Sunil Gupta and Vu Nguyen and Svetha Venkatesh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2883--2891},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rana17a/rana17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/rana17a.html},\n abstract = \t {Bayesian optimization is an efficient way to optimize expensive black-box functions such as designing a new product with highest quality or hyperparameter tuning of a machine learning algorithm. However, it has a serious limitation when the parameter space is high-dimensional as Bayesian optimization crucially depends on solving a global optimization of a surrogate utility function in the same sized dimensions. The surrogate utility function, known commonly as acquisition function is a continuous function but can be extremely sharp at high dimension - having only a few peaks marooned in a large terrain of almost flat surface. Global optimization algorithms such as DIRECT are infeasible at higher dimensions and gradient-dependent methods cannot move if initialized in the flat terrain. We propose an algorithm that enables local gradient-dependent algorithms to move through the flat terrain by using a sequence of gross-to-finer Gaussian process priors on the objective function as we leverage two underlying facts - a) there exists a large enough length-scales for which the acquisition function can be made to have a significant gradient at any location in the parameter space, and b) the extrema of the consecutive acquisition functions are close although they are different only due to a small difference in the length-scales. Theoretical guarantees are provided and experiments clearly demonstrate the utility of the proposed method at high dimension using both benchmark test functions and real-world case studies.}\n}", "pdf": "http://proceedings.mlr.press/v70/rana17a/rana17a.pdf", "supp": "", "pdf_size": 720903, "gs_citation": 142, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15462991500785501054&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Centre for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia; Centre for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia; Centre for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia; Centre for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia; Centre for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia", "aff_domain": "deakin.edu.au; ; ; ; ", "email": "deakin.edu.au; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/rana17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Deakin University", "aff_unique_dep": "Centre for Pattern Recognition and Data Analytics (PRaDA)", "aff_unique_url": "https://www.deakin.edu.au", "aff_unique_abbr": "Deakin", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "Australia" }, { "title": "High-Dimensional Structured Quantile Regression", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/741", "id": "741", "author_site": "Vidyashankar Sivakumar, Arindam Banerjee", "author": "Vidyashankar Sivakumar; Arindam Banerjee", "abstract": "Quantile regression aims at modeling the conditional median and quantiles of a response variable given certain predictor variables. In this work we consider the problem of linear quantile regression in high dimensions where the number of predictor variables is much higher than the number of samples available for parameter estimation. We assume the true parameter to have some structure characterized as having a small value according to some atomic norm R(.) and consider the norm regularized quantile regression estimator. We characterize the sample complexity for consistent recovery and give non-asymptotic bounds on the estimation error. While this problem has been previously considered, our analysis reveals geometric and statistical characteristics of the problem not available in prior literature. We perform experiments on synthetic data which support the theoretical results.", "bibtex": "@InProceedings{pmlr-v70-sivakumar17a,\n title = \t {High-Dimensional Structured Quantile Regression},\n author = {Vidyashankar Sivakumar and Arindam Banerjee},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3220--3229},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sivakumar17a/sivakumar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sivakumar17a.html},\n abstract = \t {Quantile regression aims at modeling the conditional median and quantiles of a response variable given certain predictor variables. In this work we consider the problem of linear quantile regression in high dimensions where the number of predictor variables is much higher than the number of samples available for parameter estimation. We assume the true parameter to have some structure characterized as having a small value according to some atomic norm R(.) and consider the norm regularized quantile regression estimator. We characterize the sample complexity for consistent recovery and give non-asymptotic bounds on the estimation error. While this problem has been previously considered, our analysis reveals geometric and statistical characteristics of the problem not available in prior literature. We perform experiments on synthetic data which support the theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v70/sivakumar17a/sivakumar17a.pdf", "supp": "", "pdf_size": 785872, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5523900719815890474&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science & Engineering, University of Minnesota, Twin Cities; Department of Computer Science & Engineering, University of Minnesota, Twin Cities", "aff_domain": "umn.edu;cs.umn.edu", "email": "umn.edu;cs.umn.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/sivakumar17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Minnesota", "aff_unique_dep": "Department of Computer Science & Engineering", "aff_unique_url": "https://www.minnesota.edu", "aff_unique_abbr": "UMN", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Twin Cities", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "High-Dimensional Variance-Reduced Stochastic Gradient Expectation-Maximization Algorithm", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/547", "id": "547", "author_site": "Rongda Zhu, Lingxiao Wang, Chengxiang Zhai, Quanquan Gu", "author": "Rongda Zhu; Lingxiao Wang; Chengxiang Zhai; Quanquan Gu", "abstract": "We propose a generic stochastic expectation-maximization (EM) algorithm for the estimation of high-dimensional latent variable models. At the core of our algorithm is a novel semi-stochastic variance-reduced gradient designed for the $Q$-function in the EM algorithm. Under a mild condition on the initialization, our algorithm is guaranteed to attain a linear convergence rate to the unknown parameter of the latent variable model, and achieve an optimal statistical rate up to a logarithmic factor for parameter estimation. Compared with existing high-dimensional EM algorithms, our algorithm enjoys a better computational complexity and is therefore more efficient. We apply our generic algorithm to two illustrative latent variable models: Gaussian mixture model and mixture of linear regression, and demonstrate the advantages of our algorithm by both theoretical analysis and numerical experiments. We believe that the proposed semi-stochastic gradient is of independent interest for general nonconvex optimization problems with bivariate structures.", "bibtex": "@InProceedings{pmlr-v70-zhu17a,\n title = \t {High-Dimensional Variance-Reduced Stochastic Gradient Expectation-Maximization Algorithm},\n author = {Rongda Zhu and Lingxiao Wang and Chengxiang Zhai and Quanquan Gu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4180--4188},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhu17a/zhu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhu17a.html},\n abstract = \t {We propose a generic stochastic expectation-maximization (EM) algorithm for the estimation of high-dimensional latent variable models. At the core of our algorithm is a novel semi-stochastic variance-reduced gradient designed for the $Q$-function in the EM algorithm. Under a mild condition on the initialization, our algorithm is guaranteed to attain a linear convergence rate to the unknown parameter of the latent variable model, and achieve an optimal statistical rate up to a logarithmic factor for parameter estimation. Compared with existing high-dimensional EM algorithms, our algorithm enjoys a better computational complexity and is therefore more efficient. We apply our generic algorithm to two illustrative latent variable models: Gaussian mixture model and mixture of linear regression, and demonstrate the advantages of our algorithm by both theoretical analysis and numerical experiments. We believe that the proposed semi-stochastic gradient is of independent interest for general nonconvex optimization problems with bivariate structures.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhu17a/zhu17a.pdf", "supp": "", "pdf_size": 471160, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18444494515140043374&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/zhu17a.html" }, { "title": "High-dimensional Non-Gaussian Single Index Models via Thresholded Score Function Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/737", "id": "737", "author_site": "Zhuoran Yang, Krishnakumar Balasubramanian, Han Liu", "author": "Zhuoran Yang; Krishnakumar Balasubramanian; Han Liu", "abstract": "We consider estimating the parametric component of single index models in high dimensions. Compared with existing work, we do not require the covariate to be normally distributed. Utilizing Stein\u2019s Lemma, we propose estimators based on the score function of the covariate. Moreover, to handle score function and response variables that are heavy-tailed, our estimators are constructed via carefully thresholding their empirical counterparts. Under a bounded fourth moment condition, we establish optimal statistical rates of convergence for the proposed estimators. Extensive numerical experiments are provided to back up our theory.", "bibtex": "@InProceedings{pmlr-v70-yang17a,\n title = \t {High-dimensional Non-{G}aussian Single Index Models via Thresholded Score Function Estimation},\n author = {Zhuoran Yang and Krishnakumar Balasubramanian and Han Liu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3851--3860},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17a/yang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17a.html},\n abstract = \t {We consider estimating the parametric component of single index models in high dimensions. Compared with existing work, we do not require the covariate to be normally distributed. Utilizing Stein\u2019s Lemma, we propose estimators based on the score function of the covariate. Moreover, to handle score function and response variables that are heavy-tailed, our estimators are constructed via carefully thresholding their empirical counterparts. Under a bounded fourth moment condition, we establish optimal statistical rates of convergence for the proposed estimators. Extensive numerical experiments are provided to back up our theory.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17a/yang17a.pdf", "supp": "", "pdf_size": 432564, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13541277531320044354&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Operations Research and Financial Engineering, Princeton University; Department of Operations Research and Financial Engineering, Princeton University; Department of Operations Research and Financial Engineering, Princeton University", "aff_domain": "princeton.edu;princeton.edu;princeton.edu", "email": "princeton.edu;princeton.edu;princeton.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/yang17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Department of Operations Research and Financial Engineering", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "How Close Are the Eigenvectors of the Sample and Actual Covariance Matrices?", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/489", "id": "489", "author": "Andreas Loukas", "abstract": "How many samples are sufficient to guarantee that the eigenvectors of the sample covariance matrix are close to those of the actual covariance matrix? For a wide family of distributions, including distributions with finite second moment and sub-gaussian distributions supported in a centered Euclidean ball, we prove that the inner product between eigenvectors of the sample and actual covariance matrices decreases proportionally to the respective eigenvalue distance and the number of samples. Our findings imply", "bibtex": "@InProceedings{pmlr-v70-loukas17a,\n title = \t {How Close Are the Eigenvectors of the Sample and Actual Covariance Matrices?},\n author = {Andreas Loukas},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2228--2237},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/loukas17a/loukas17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/loukas17a.html},\n abstract = \t {How many samples are sufficient to guarantee that the eigenvectors of the sample covariance matrix are close to those of the actual covariance matrix? For a wide family of distributions, including distributions with finite second moment and sub-gaussian distributions supported in a centered Euclidean ball, we prove that the inner product between eigenvectors of the sample and actual covariance matrices decreases proportionally to the respective eigenvalue distance and the number of samples. Our findings imply", "pdf": "http://proceedings.mlr.press/v70/loukas17a/loukas17a.pdf", "supp": "", "pdf_size": 452571, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13633242869174287295&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "\u00b4Ecole Polytechnique F\u00b4ed\u00b4erale de Lausanne, Switzerland", "aff_domain": "epfl.ch", "email": "epfl.ch", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/loukas17a.html", "aff_unique_index": "0", "aff_unique_norm": "EPFL", "aff_unique_dep": "", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_country_unique_index": "0", "aff_country_unique": "Switzerland" }, { "title": "How to Escape Saddle Points Efficiently", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/640", "id": "640", "author_site": "Chi Jin, Rong Ge, Praneeth Netrapalli, Sham Kakade, Michael Jordan", "author": "Chi Jin; Rong Ge; Praneeth Netrapalli; Sham M. Kakade; Michael I. Jordan", "abstract": "This paper shows that a perturbed form of gradient descent converges to a second-order stationary point in a number iterations which depends only poly-logarithmically on dimension (i.e., it is almost \u201cdimension-free\u201d). The convergence rate of this procedure matches the well-known convergence rate of gradient descent to first-order stationary points, up to log factors. When all saddle points are non-degenerate, all second-order stationary points are local minima, and our result thus shows that perturbed gradient descent can escape saddle points almost for free. Our results can be directly applied to many machine learning applications, including deep learning. As a particular concrete example of such an application, we show that our results can be used directly to establish sharp global convergence rates for matrix factorization. Our results rely on a novel characterization of the geometry around saddle points, which may be of independent interest to the non-convex optimization community.", "bibtex": "@InProceedings{pmlr-v70-jin17a,\n title = \t {How to Escape Saddle Points Efficiently},\n author = {Chi Jin and Rong Ge and Praneeth Netrapalli and Sham M. Kakade and Michael I. Jordan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1724--1732},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jin17a/jin17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jin17a.html},\n abstract = \t {This paper shows that a perturbed form of gradient descent converges to a second-order stationary point in a number iterations which depends only poly-logarithmically on dimension (i.e., it is almost \u201cdimension-free\u201d). The convergence rate of this procedure matches the well-known convergence rate of gradient descent to first-order stationary points, up to log factors. When all saddle points are non-degenerate, all second-order stationary points are local minima, and our result thus shows that perturbed gradient descent can escape saddle points almost for free. Our results can be directly applied to many machine learning applications, including deep learning. As a particular concrete example of such an application, we show that our results can be used directly to establish sharp global convergence rates for matrix factorization. Our results rely on a novel characterization of the geometry around saddle points, which may be of independent interest to the non-convex optimization community.}\n}", "pdf": "http://proceedings.mlr.press/v70/jin17a/jin17a.pdf", "supp": "", "pdf_size": 629949, "gs_citation": 1074, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4868890107031817813&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/jin17a.html" }, { "title": "Hyperplane Clustering via Dual Principal Component Pursuit", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/736", "id": "736", "author_site": "Manolis Tsakiris, Rene Vidal", "author": "Manolis C. Tsakiris; Ren\u00e9 Vidal", "abstract": "State-of-the-art methods for clustering data drawn from a union of subspaces are based on sparse and low-rank representation theory and convex optimization algorithms. Existing results guaranteeing the correctness of such methods require the dimension of the subspaces to be small relative to the dimension of the ambient space. When this assumption is violated, as is, e.g., in the case of hyperplanes, existing methods are either computationally too intensive (e.g., algebraic methods) or lack sufficient theoretical support (e.g., K-Hyperplanes or RANSAC). In this paper we provide theoretical and algorithmic contributions to the problem of clustering data from a union of hyperplanes, by extending a recent subspace learning method called Dual Principal Component Pursuit (DPCP) to the multi-hyperplane case. We give theoretical guarantees under which, the non-convex $\\ell_1$ problem associated with DPCP admits a unique global minimizer equal to the normal vector of the most dominant hyperplane. Inspired by this insight, we propose sequential (RANSAC-style) and iterative (K-Hyperplanes-style) hyperplane learning DPCP algorithms, which, via experiments on synthetic and real data, are shown to outperform or be competitive to the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v70-tsakiris17a,\n title = \t {Hyperplane Clustering via Dual Principal Component Pursuit},\n author = {Manolis C. Tsakiris and Ren{\\'e} Vidal},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3472--3481},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tsakiris17a/tsakiris17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tsakiris17a.html},\n abstract = \t {State-of-the-art methods for clustering data drawn from a union of subspaces are based on sparse and low-rank representation theory and convex optimization algorithms. Existing results guaranteeing the correctness of such methods require the dimension of the subspaces to be small relative to the dimension of the ambient space. When this assumption is violated, as is, e.g., in the case of hyperplanes, existing methods are either computationally too intensive (e.g., algebraic methods) or lack sufficient theoretical support (e.g., K-Hyperplanes or RANSAC). In this paper we provide theoretical and algorithmic contributions to the problem of clustering data from a union of hyperplanes, by extending a recent subspace learning method called Dual Principal Component Pursuit (DPCP) to the multi-hyperplane case. We give theoretical guarantees under which, the non-convex $\\ell_1$ problem associated with DPCP admits a unique global minimizer equal to the normal vector of the most dominant hyperplane. Inspired by this insight, we propose sequential (RANSAC-style) and iterative (K-Hyperplanes-style) hyperplane learning DPCP algorithms, which, via experiments on synthetic and real data, are shown to outperform or be competitive to the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v70/tsakiris17a/tsakiris17a.pdf", "supp": "", "pdf_size": 418278, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14191021543377155536&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Center for Imaging Science, Johns Hopkins University, Baltimore, MD, USA; Center for Imaging Science, Johns Hopkins University, Baltimore, MD, USA", "aff_domain": "jhu.edu; ", "email": "jhu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/tsakiris17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Johns Hopkins University", "aff_unique_dep": "Center for Imaging Science", "aff_unique_url": "https://www.jhu.edu", "aff_unique_abbr": "JHU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Baltimore", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Identification and Model Testing in Linear Structural Equation Models using Auxiliary Variables", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/783", "id": "783", "author_site": "Bryant Chen, Daniel Kumor, Elias Bareinboim", "author": "Bryant Chen; Daniel Kumor; Elias Bareinboim", "abstract": "We developed a novel approach to identification and model testing in linear structural equation models (SEMs) based on auxiliary variables (AVs), which generalizes a widely-used family of methods known as instrumental variables. The identification problem is concerned with the conditions under which causal parameters can be uniquely estimated from an observational, non-causal covariance matrix. In this paper, we provide an algorithm for the identification of causal parameters in linear structural models that subsumes previous state-of-the-art methods. In other words, our algorithm identifies strictly more coefficients and models than methods previously known in the literature. Our algorithm builds on a graph-theoretic characterization of conditional independence relations between auxiliary and model variables, which is developed in this paper. Further, we leverage this new characterization for allowing identification when limited experimental data or new substantive knowledge about the domain is available. Lastly, we develop a new procedure for model testing using AVs.", "bibtex": "@InProceedings{pmlr-v70-chen17f,\n title = \t {Identification and Model Testing in Linear Structural Equation Models using Auxiliary Variables},\n author = {Bryant Chen and Daniel Kumor and Elias Bareinboim},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {757--766},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17f/chen17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17f.html},\n abstract = \t {We developed a novel approach to identification and model testing in linear structural equation models (SEMs) based on auxiliary variables (AVs), which generalizes a widely-used family of methods known as instrumental variables. The identification problem is concerned with the conditions under which causal parameters can be uniquely estimated from an observational, non-causal covariance matrix. In this paper, we provide an algorithm for the identification of causal parameters in linear structural models that subsumes previous state-of-the-art methods. In other words, our algorithm identifies strictly more coefficients and models than methods previously known in the literature. Our algorithm builds on a graph-theoretic characterization of conditional independence relations between auxiliary and model variables, which is developed in this paper. Further, we leverage this new characterization for allowing identification when limited experimental data or new substantive knowledge about the domain is available. Lastly, we develop a new procedure for model testing using AVs.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17f/chen17f.pdf", "supp": "", "pdf_size": 454796, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11482533329616288206&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "IBM Research, San Jose, California, USA+Purdue University, West Lafayette, Indiana, USA; Purdue University, West Lafayette, Indiana, USA; Purdue University, West Lafayette, Indiana, USA", "aff_domain": "ibm.com;purdue.edu; ", "email": "ibm.com;purdue.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/chen17f.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "IBM;Purdue University", "aff_unique_dep": "IBM Research;", "aff_unique_url": "https://www.ibm.com/research;https://www.purdue.edu", "aff_unique_abbr": "IBM;Purdue", "aff_campus_unique_index": "0+1;1;1", "aff_campus_unique": "San Jose;West Lafayette", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Identify the Nash Equilibrium in Static Games with Random Payoffs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/681", "id": "681", "author_site": "Yichi Zhou, Jialian Li, Jun Zhu", "author": "Yichi Zhou; Jialian Li; Jun Zhu", "abstract": "We study the problem on how to learn the pure Nash Equilibrium of a two-player zero-sum static game with random payoffs under unknown distributions via efficient payoff queries. We introduce a multi-armed bandit model to this problem due to its ability to find the best arm efficiently among random arms and propose two algorithms for this problem\u2014LUCB-G based on the confidence bounds and a racing algorithm based on successive action elimination. We provide an analysis on the sample complexity lower bound when the Nash Equilibrium exists.", "bibtex": "@InProceedings{pmlr-v70-zhou17b,\n title = \t {Identify the {N}ash Equilibrium in Static Games with Random Payoffs},\n author = {Yichi Zhou and Jialian Li and Jun Zhu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4160--4169},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhou17b/zhou17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhou17b.html},\n abstract = \t {We study the problem on how to learn the pure Nash Equilibrium of a two-player zero-sum static game with random payoffs under unknown distributions via efficient payoff queries. We introduce a multi-armed bandit model to this problem due to its ability to find the best arm efficiently among random arms and propose two algorithms for this problem\u2014LUCB-G based on the confidence bounds and a racing algorithm based on successive action elimination. We provide an analysis on the sample complexity lower bound when the Nash Equilibrium exists.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhou17b/zhou17b.pdf", "supp": "", "pdf_size": 351894, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5249236187138579828&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Dept. of Comp. Sci. & Tech., TNList Lab, State Key Lab for Intell. Tech. & Systems, CBICR Center, Tsinghua University; Dept. of Comp. Sci. & Tech., TNList Lab, State Key Lab for Intell. Tech. & Systems, CBICR Center, Tsinghua University; Dept. of Comp. Sci. & Tech., TNList Lab, State Key Lab for Intell. Tech. & Systems, CBICR Center, Tsinghua University", "aff_domain": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "email": "tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zhou17b.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "Dept. of Comp. Sci. & Tech.", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "THU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Identifying Best Interventions through Online Importance Sampling", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/619", "id": "619", "author_site": "Rajat Sen, Karthikeyan Shanmugam, Alexandros Dimakis, Sanjay Shakkottai", "author": "Rajat Sen; Karthikeyan Shanmugam; Alexandros G. Dimakis; Sanjay Shakkottai", "abstract": "Motivated by applications in computational advertising and systems biology, we consider the problem of identifying the best out of several possible soft interventions at a source node $V$ in an acyclic causal directed graph, to maximize the expected value of a target node $Y$ (located downstream of $V$). Our setting imposes a fixed total budget for sampling under various interventions, along with cost constraints on different types of interventions. We pose this as a best arm identification bandit problem with $K$ arms, where each arm is a soft intervention at $V$ and leverage the information leakage among the arms to provide the first gap dependent error and simple regret bounds for this problem. Our results are a significant improvement over the traditional best arm identification results. We empirically show that our algorithms outperform the state of the art in the Flow Cytometry data-set, and also apply our algorithm for model interpretation of the Inception-v3 deep net that classifies images.", "bibtex": "@InProceedings{pmlr-v70-sen17a,\n title = \t {Identifying Best Interventions through Online Importance Sampling},\n author = {Rajat Sen and Karthikeyan Shanmugam and Alexandros G. Dimakis and Sanjay Shakkottai},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3057--3066},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sen17a/sen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sen17a.html},\n abstract = \t {Motivated by applications in computational advertising and systems biology, we consider the problem of identifying the best out of several possible soft interventions at a source node $V$ in an acyclic causal directed graph, to maximize the expected value of a target node $Y$ (located downstream of $V$). Our setting imposes a fixed total budget for sampling under various interventions, along with cost constraints on different types of interventions. We pose this as a best arm identification bandit problem with $K$ arms, where each arm is a soft intervention at $V$ and leverage the information leakage among the arms to provide the first gap dependent error and simple regret bounds for this problem. Our results are a significant improvement over the traditional best arm identification results. We empirically show that our algorithms outperform the state of the art in the Flow Cytometry data-set, and also apply our algorithm for model interpretation of the Inception-v3 deep net that classifies images.}\n}", "pdf": "http://proceedings.mlr.press/v70/sen17a/sen17a.pdf", "supp": "", "pdf_size": 1477000, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17995648532112582656&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "The University of Texas at Austin; IBM Thomas J. Watson Research Center; The University of Texas at Austin; The University of Texas at Austin", "aff_domain": "utexas.edu; ; ; ", "email": "utexas.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/sen17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "University of Texas at Austin;IBM", "aff_unique_dep": ";Research", "aff_unique_url": "https://www.utexas.edu;https://www.ibm.com/research", "aff_unique_abbr": "UT Austin;IBM", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Austin;Yorktown Heights", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Image-to-Markup Generation with Coarse-to-Fine Attention", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/498", "id": "498", "author_site": "Yuntian Deng, Anssi Kanervisto, Jeffrey Ling, Alexander Rush", "author": "Yuntian Deng; Anssi Kanervisto; Jeffrey Ling; Alexander M. Rush", "abstract": "We present a neural encoder-decoder model to convert images into presentational markup based on a scalable coarse-to-fine attention mechanism. Our method is evaluated in the context of image-to-LaTeX generation, and we introduce a new dataset of real-world rendered mathematical expressions paired with LaTeX markup. We show that unlike neural OCR techniques using CTC-based models, attention-based approaches can tackle this non-standard OCR task. Our approach outperforms classical mathematical OCR systems by a large margin on in-domain rendered data, and, with pretraining, also performs well on out-of-domain handwritten data. To reduce the inference complexity associated with the attention-based approaches, we introduce a new coarse-to-fine attention layer that selects a support region before applying attention.", "bibtex": "@InProceedings{pmlr-v70-deng17a,\n title = \t {Image-to-Markup Generation with Coarse-to-Fine Attention},\n author = {Yuntian Deng and Anssi Kanervisto and Jeffrey Ling and Alexander M. Rush},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {980--989},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/deng17a/deng17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/deng17a.html},\n abstract = \t {We present a neural encoder-decoder model to convert images into presentational markup based on a scalable coarse-to-fine attention mechanism. Our method is evaluated in the context of image-to-LaTeX generation, and we introduce a new dataset of real-world rendered mathematical expressions paired with LaTeX markup. We show that unlike neural OCR techniques using CTC-based models, attention-based approaches can tackle this non-standard OCR task. Our approach outperforms classical mathematical OCR systems by a large margin on in-domain rendered data, and, with pretraining, also performs well on out-of-domain handwritten data. To reduce the inference complexity associated with the attention-based approaches, we introduce a new coarse-to-fine attention layer that selects a support region before applying attention.}\n}", "pdf": "http://proceedings.mlr.press/v70/deng17a/deng17a.pdf", "supp": "", "pdf_size": 596450, "gs_citation": 301, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11132496968334497350&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Harvard University; University of Eastern Finland; Harvard University; Harvard University", "aff_domain": "seas.harvard.edu; ; ; ", "email": "seas.harvard.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/deng17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Harvard University;University of Eastern Finland", "aff_unique_dep": ";", "aff_unique_url": "https://www.harvard.edu;https://www.uef.fi", "aff_unique_abbr": "Harvard;UEF", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "United States;Finland" }, { "title": "Improved Variational Autoencoders for Text Modeling using Dilated Convolutions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/834", "id": "834", "author_site": "Zichao Yang, Zhiting Hu, Ruslan Salakhutdinov, Taylor Berg-Kirkpatrick", "author": "Zichao Yang; Zhiting Hu; Ruslan Salakhutdinov; Taylor Berg-Kirkpatrick", "abstract": "Recent work on generative text modeling has found that variational autoencoders (VAE) with LSTM decoders perform worse than simpler LSTM language models (Bowman et al., 2015). This negative result is so far poorly understood, but has been attributed to the propensity of LSTM decoders to ignore conditioning information from the encoder. In this paper, we experiment with a new type of decoder for VAE: a dilated CNN. By changing the decoder\u2019s dilation architecture, we control the size of context from previously generated words. In experiments, we find that there is a trade-off between contextual capacity of the decoder and effective use of encoding information. We show that when carefully managed, VAEs can outperform LSTM language models. We demonstrate perplexity gains on two datasets, representing the first positive language modeling result with VAE. Further, we conduct an in-depth investigation of the use of VAE (with our new decoding architecture) for semi-supervised and unsupervised labeling tasks, demonstrating gains over several strong baselines.", "bibtex": "@InProceedings{pmlr-v70-yang17d,\n title = \t {Improved Variational Autoencoders for Text Modeling using Dilated Convolutions},\n author = {Zichao Yang and Zhiting Hu and Ruslan Salakhutdinov and Taylor Berg-Kirkpatrick},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3881--3890},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17d/yang17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17d.html},\n abstract = \t {Recent work on generative text modeling has found that variational autoencoders (VAE) with LSTM decoders perform worse than simpler LSTM language models (Bowman et al., 2015). This negative result is so far poorly understood, but has been attributed to the propensity of LSTM decoders to ignore conditioning information from the encoder. In this paper, we experiment with a new type of decoder for VAE: a dilated CNN. By changing the decoder\u2019s dilation architecture, we control the size of context from previously generated words. In experiments, we find that there is a trade-off between contextual capacity of the decoder and effective use of encoding information. We show that when carefully managed, VAEs can outperform LSTM language models. We demonstrate perplexity gains on two datasets, representing the first positive language modeling result with VAE. Further, we conduct an in-depth investigation of the use of VAE (with our new decoding architecture) for semi-supervised and unsupervised labeling tasks, demonstrating gains over several strong baselines.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17d/yang17d.pdf", "supp": "", "pdf_size": 575968, "gs_citation": 485, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=123823476347966018&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ; ", "email": "cs.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/yang17d.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Improving Gibbs Sampler Scan Quality with DoGS", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/765", "id": "765", "author_site": "Ioannis Mitliagkas, Lester Mackey", "author": "Ioannis Mitliagkas; Lester Mackey", "abstract": "The pairwise influence matrix of Dobrushin has long been used as an analytical tool to bound the rate of convergence of Gibbs sampling. In this work, we use Dobrushin influence as the basis of a practical tool to certify and efficiently improve the quality of a Gibbs sampler. Our Dobrushin-optimized Gibbs samplers (DoGS) offer customized variable selection orders for a given sampling budget and variable subset of interest, explicit bounds on total variation distance to stationarity, and certifiable improvements over the standard systematic and uniform random scan Gibbs samplers. In our experiments with image segmentation, Markov chain Monte Carlo maximum likelihood estimation, and Ising model inference, DoGS consistently deliver higher-quality inferences with significantly smaller sampling budgets than standard Gibbs samplers.", "bibtex": "@InProceedings{pmlr-v70-mitliagkas17a,\n title = \t {Improving {G}ibbs Sampler Scan Quality with {D}o{GS}},\n author = {Ioannis Mitliagkas and Lester Mackey},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2469--2477},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mitliagkas17a/mitliagkas17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mitliagkas17a.html},\n abstract = \t {The pairwise influence matrix of Dobrushin has long been used as an analytical tool to bound the rate of convergence of Gibbs sampling. In this work, we use Dobrushin influence as the basis of a practical tool to certify and efficiently improve the quality of a Gibbs sampler. Our Dobrushin-optimized Gibbs samplers (DoGS) offer customized variable selection orders for a given sampling budget and variable subset of interest, explicit bounds on total variation distance to stationarity, and certifiable improvements over the standard systematic and uniform random scan Gibbs samplers. In our experiments with image segmentation, Markov chain Monte Carlo maximum likelihood estimation, and Ising model inference, DoGS consistently deliver higher-quality inferences with significantly smaller sampling budgets than standard Gibbs samplers.}\n}", "pdf": "http://proceedings.mlr.press/v70/mitliagkas17a/mitliagkas17a.pdf", "supp": "", "pdf_size": 2669508, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2829055521871341127&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, Stanford University, Stanford, CA 94305 USA; Microsoft Research New England, One Memorial Drive, Cambridge, MA 02142 USA", "aff_domain": "stanford.edu;microsoft.com", "email": "stanford.edu;microsoft.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/mitliagkas17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Stanford University;Microsoft", "aff_unique_dep": "Department of Computer Science;Microsoft Research New England", "aff_unique_url": "https://www.stanford.edu;https://www.microsoft.com/en-us/research/group/new-england", "aff_unique_abbr": "Stanford;MSR NE", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Stanford;Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Improving Stochastic Policy Gradients in Continuous Control with Deep Reinforcement Learning using the Beta Distribution", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/733", "id": "733", "author_site": "Po-Wei Chou, Daniel Maturana, Sebastian Scherer", "author": "Po-Wei Chou; Daniel Maturana; Sebastian Scherer", "abstract": "Recently, reinforcement learning with deep neural networks has achieved great success in challenging continuous control problems such as 3D locomotion and robotic manipulation. However, in real-world control problems, the actions one can take are bounded by physical constraints, which introduces a bias when the standard Gaussian distribution is used as the stochastic policy. In this work, we propose to use the Beta distribution as an alternative and analyze the bias and variance of the policy gradients of both policies. We show that the Beta policy is bias-free and provides significantly faster convergence and higher scores over the Gaussian policy when both are used with trust region policy optimization (TRPO) and actor critic with experience replay (ACER), the state-of-the-art on- and off-policy stochastic methods respectively, on OpenAI Gym\u2019s and MuJoCo\u2019s continuous control environments.", "bibtex": "@InProceedings{pmlr-v70-chou17a,\n title = \t {Improving Stochastic Policy Gradients in Continuous Control with Deep Reinforcement Learning using the Beta Distribution},\n author = {Po-Wei Chou and Daniel Maturana and Sebastian Scherer},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {834--843},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chou17a/chou17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chou17a.html},\n abstract = \t {Recently, reinforcement learning with deep neural networks has achieved great success in challenging continuous control problems such as 3D locomotion and robotic manipulation. However, in real-world control problems, the actions one can take are bounded by physical constraints, which introduces a bias when the standard Gaussian distribution is used as the stochastic policy. In this work, we propose to use the Beta distribution as an alternative and analyze the bias and variance of the policy gradients of both policies. We show that the Beta policy is bias-free and provides significantly faster convergence and higher scores over the Gaussian policy when both are used with trust region policy optimization (TRPO) and actor critic with experience replay (ACER), the state-of-the-art on- and off-policy stochastic methods respectively, on OpenAI Gym\u2019s and MuJoCo\u2019s continuous control environments.}\n}", "pdf": "http://proceedings.mlr.press/v70/chou17a/chou17a.pdf", "supp": "", "pdf_size": 1261219, "gs_citation": 241, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1304094401412603023&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Robotics Institute, Carnegie Mellon University, USA; Robotics Institute, Carnegie Mellon University, USA; Robotics Institute, Carnegie Mellon University, USA", "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/chou17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Robotics Institute", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Improving Viterbi is Hard: Better Runtimes Imply Faster Clique Algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/728", "id": "728", "author_site": "Arturs Backurs, Christos Tzamos", "author": "Arturs Backurs; Christos Tzamos", "abstract": "The classic algorithm of Viterbi computes the most likely path in a Hidden Markov Model (HMM) that results in a given sequence of observations. It runs in time $O(Tn^2)$ given a sequence of T observations from a HMM with n states. Despite significant interest in the problem and prolonged effort by different communities, no known algorithm achieves more than a polylogarithmic speedup. In this paper, we explain this difficulty by providing matching conditional lower bounds. Our lower bounds are based on assumptions that the best known algorithms for the All-Pairs Shortest Paths problem (APSP) and for the Max-Weight k-Clique problem in edge-weighted graphs are essentially tight. Finally, using a recent algorithm by Green Larsen and Williams for online Boolean matrix-vector multiplication, we get a $2^{\\Omega(\\sqrt{\\log n})}$ speedup for the Viterbi algorithm when there are few distinct transition probabilities in the HMM.", "bibtex": "@InProceedings{pmlr-v70-backurs17a,\n title = \t {Improving {V}iterbi is Hard: Better Runtimes Imply Faster Clique Algorithms},\n author = {Arturs Backurs and Christos Tzamos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {311--321},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/backurs17a/backurs17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/backurs17a.html},\n abstract = \t {The classic algorithm of Viterbi computes the most likely path in a Hidden Markov Model (HMM) that results in a given sequence of observations. It runs in time $O(Tn^2)$ given a sequence of T observations from a HMM with n states. Despite significant interest in the problem and prolonged effort by different communities, no known algorithm achieves more than a polylogarithmic speedup. In this paper, we explain this difficulty by providing matching conditional lower bounds. Our lower bounds are based on assumptions that the best known algorithms for the All-Pairs Shortest Paths problem (APSP) and for the Max-Weight k-Clique problem in edge-weighted graphs are essentially tight. Finally, using a recent algorithm by Green Larsen and Williams for online Boolean matrix-vector multiplication, we get a $2^{\\Omega(\\sqrt{\\log n})}$ speedup for the Viterbi algorithm when there are few distinct transition probabilities in the HMM.}\n}", "pdf": "http://proceedings.mlr.press/v70/backurs17a/backurs17a.pdf", "supp": "", "pdf_size": 335960, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6610415580592082000&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "MIT, US; MIT, US", "aff_domain": "mit.edu;mit.edu", "email": "mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/backurs17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Innovation Pursuit: A New Approach to the Subspace Clustering Problem", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/579", "id": "579", "author_site": "Mostafa Rahmani, George Atia", "author": "Mostafa Rahmani; George Atia", "abstract": "This paper presents a new scalable approach, termed Innovation Pursuit (iPursuit), to the problem of subspace clustering. iPursuit rests on a new geometrical idea whereby each subspace is identified based on its novelty with respect to the other subspaces. The subspaces are identified consecutively by solving a series of simple linear optimization problems, each searching for a direction of innovation in the span of the data. A detailed mathematical analysis is provided establishing sufficient conditions for the proposed approach to correctly cluster the data points. Moreover, the proposed direction search approach can be integrated with spectral clustering to yield a new variant of spectral-clustering-based algorithms. Remarkably, the proposed approach can provably yield exact clustering even when the subspaces have significant intersections. The numerical simulations demonstrate that iPursuit can often outperform the state-of-the-art subspace clustering algorithms \u2013 more so for subspaces with significant intersections \u2013 along with substantial reductions in computational complexity.", "bibtex": "@InProceedings{pmlr-v70-rahmani17b,\n title = \t {Innovation Pursuit: A New Approach to the Subspace Clustering Problem},\n author = {Mostafa Rahmani and George Atia},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2874--2882},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rahmani17b/rahmani17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/rahmani17b.html},\n abstract = \t {This paper presents a new scalable approach, termed Innovation Pursuit (iPursuit), to the problem of subspace clustering. iPursuit rests on a new geometrical idea whereby each subspace is identified based on its novelty with respect to the other subspaces. The subspaces are identified consecutively by solving a series of simple linear optimization problems, each searching for a direction of innovation in the span of the data. A detailed mathematical analysis is provided establishing sufficient conditions for the proposed approach to correctly cluster the data points. Moreover, the proposed direction search approach can be integrated with spectral clustering to yield a new variant of spectral-clustering-based algorithms. Remarkably, the proposed approach can provably yield exact clustering even when the subspaces have significant intersections. The numerical simulations demonstrate that iPursuit can often outperform the state-of-the-art subspace clustering algorithms \u2013 more so for subspaces with significant intersections \u2013 along with substantial reductions in computational complexity.}\n}", "pdf": "http://proceedings.mlr.press/v70/rahmani17b/rahmani17b.pdf", "supp": "", "pdf_size": 1083351, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4104690746799122678&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 5, "aff": "University of Central Florida, Orlando, Florida, USA; University of Central Florida, Orlando, Florida, USA", "aff_domain": "knights.ucf.edu; ", "email": "knights.ucf.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/rahmani17b.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Central Florida", "aff_unique_dep": "", "aff_unique_url": "https://www.ucf.edu", "aff_unique_abbr": "UCF", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Orlando", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Input Convex Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/835", "id": "835", "author_site": "Brandon Amos, Lei Xu, Zico Kolter", "author": "Brandon Amos; Lei Xu; J. Zico Kolter", "abstract": "This paper presents the input convex neural network architecture. These are scalar-valued (potentially deep) neural networks with constraints on the network parameters such that the output of the network is a convex function of (some of) the inputs. The networks allow for efficient inference via optimization over some inputs to the network given others, and can be applied to settings including structured prediction, data imputation, reinforcement learning, and others. In this paper we lay the basic groundwork for these models, proposing methods for inference, optimization and learning, and analyze their representational power. We show that many existing neural network architectures can be made input-convex with a minor modification, and develop specialized optimization algorithms tailored to this setting. Finally, we highlight the performance of the methods on multi-label prediction, image completion, and reinforcement learning problems, where we show improvement over the existing state of the art in many cases.", "bibtex": "@InProceedings{pmlr-v70-amos17b,\n title = \t {Input Convex Neural Networks},\n author = {Brandon Amos and Lei Xu and J. Zico Kolter},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {146--155},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/amos17b/amos17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/amos17b.html},\n abstract = \t {This paper presents the input convex neural network architecture. These are scalar-valued (potentially deep) neural networks with constraints on the network parameters such that the output of the network is a convex function of (some of) the inputs. The networks allow for efficient inference via optimization over some inputs to the network given others, and can be applied to settings including structured prediction, data imputation, reinforcement learning, and others. In this paper we lay the basic groundwork for these models, proposing methods for inference, optimization and learning, and analyze their representational power. We show that many existing neural network architectures can be made input-convex with a minor modification, and develop specialized optimization algorithms tailored to this setting. Finally, we highlight the performance of the methods on multi-label prediction, image completion, and reinforcement learning problems, where we show improvement over the existing state of the art in many cases.}\n}", "pdf": "http://proceedings.mlr.press/v70/amos17b/amos17b.pdf", "supp": "", "pdf_size": 412672, "gs_citation": 858, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13044231284144961282&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "School of Computer Science, Carnegie Mellon University; Department of Computer Science and Technology, Tsinghua University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ;cs.cmu.edu", "email": "cs.cmu.edu; ;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/amos17b.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Carnegie Mellon University;Tsinghua University", "aff_unique_dep": "School of Computer Science;Department of Computer Science and Technology", "aff_unique_url": "https://www.cmu.edu;https://www.tsinghua.edu.cn", "aff_unique_abbr": "CMU;THU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;China" }, { "title": "Input Switched Affine Networks: An RNN Architecture Designed for Interpretability", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/759", "id": "759", "author_site": "Jakob Foerster, Justin Gilmer, Jan Chorowski, Jascha Sohl-Dickstein, David Sussillo", "author": "Jakob N. Foerster; Justin Gilmer; Jascha Sohl-Dickstein; Jan Chorowski; David Sussillo", "abstract": "There exist many problem domains where the interpretability of neural network models is essential for deployment. Here we introduce a recurrent architecture composed of input-switched affine transformations \u2013 in other words an RNN without any explicit nonlinearities, but with input-dependent recurrent weights. This simple form allows the RNN to be analyzed via straightforward linear methods: we can exactly characterize the linear contribution of each input to the model predictions; we can use a change-of-basis to disentangle input, output, and computational hidden unit subspaces; we can fully reverse-engineer the architecture\u2019s solution to a simple task. Despite this ease of interpretation, the input switched affine network achieves reasonable performance on a text modeling tasks, and allows greater computational efficiency than networks with standard nonlinearities.", "bibtex": "@InProceedings{pmlr-v70-foerster17a,\n title = \t {Input Switched Affine Networks: An {RNN} Architecture Designed for Interpretability},\n author = {Jakob N. Foerster and Justin Gilmer and Jascha Sohl-Dickstein and Jan Chorowski and David Sussillo},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1136--1145},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/foerster17a/foerster17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/foerster17a.html},\n abstract = \t {There exist many problem domains where the interpretability of neural network models is essential for deployment. Here we introduce a recurrent architecture composed of input-switched affine transformations \u2013 in other words an RNN without any explicit nonlinearities, but with input-dependent recurrent weights. This simple form allows the RNN to be analyzed via straightforward linear methods: we can exactly characterize the linear contribution of each input to the model predictions; we can use a change-of-basis to disentangle input, output, and computational hidden unit subspaces; we can fully reverse-engineer the architecture\u2019s solution to a simple task. Despite this ease of interpretation, the input switched affine network achieves reasonable performance on a text modeling tasks, and allows greater computational efficiency than networks with standard nonlinearities.}\n}", "pdf": "http://proceedings.mlr.press/v70/foerster17a/foerster17a.pdf", "supp": "", "pdf_size": 1559109, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2619068167690577918&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google Brain, Mountain View, CA, USA+1; Google Brain, Mountain View, CA, USA+2; Google Brain, Mountain View, CA, USA; Google Brain, Mountain View, CA, USA+4; Google Brain, Mountain View, CA, USA", "aff_domain": "cs.ox.ac.uk; ; ; ;google.com", "email": "cs.ox.ac.uk; ; ; ;google.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/foerster17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google;", "aff_unique_dep": "Google Brain;", "aff_unique_url": "https://brain.google.com;", "aff_unique_abbr": "Google Brain;", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States;" }, { "title": "Interactive Learning from Policy-Dependent Human Feedback", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/846", "id": "846", "author_site": "James MacGlashan, Mark Ho, Robert Loftin, Bei Peng, Guan Wang, David L Roberts, Matthew E. Taylor, Michael L. Littman", "author": "James MacGlashan; Mark K. Ho; Robert Loftin; Bei Peng; Guan Wang; David L. Roberts; Matthew E. Taylor; Michael L. Littman", "abstract": "This paper investigates the problem of interactively learning behaviors communicated by a human teacher using positive and negative feedback. Much previous work on this problem has made the assumption that people provide feedback for decisions that is dependent on the behavior they are teaching and is independent from the learner\u2019s current policy. We present empirical results that show this assumption to be false\u2014whether human trainers give a positive or negative feedback for a decision is influenced by the learner\u2019s current policy. Based on this insight, we introduce Convergent Actor-Critic by Humans (COACH), an algorithm for learning from policy-dependent feedback that converges to a local optimum. Finally, we demonstrate that COACH can successfully learn multiple behaviors on a physical robot.", "bibtex": "@InProceedings{pmlr-v70-macglashan17a,\n title = \t {Interactive Learning from Policy-Dependent Human Feedback},\n author = {James MacGlashan and Mark K. Ho and Robert Loftin and Bei Peng and Guan Wang and David L. Roberts and Matthew E. Taylor and Michael L. Littman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2285--2294},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/macglashan17a/macglashan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/macglashan17a.html},\n abstract = \t {This paper investigates the problem of interactively learning behaviors communicated by a human teacher using positive and negative feedback. Much previous work on this problem has made the assumption that people provide feedback for decisions that is dependent on the behavior they are teaching and is independent from the learner\u2019s current policy. We present empirical results that show this assumption to be false\u2014whether human trainers give a positive or negative feedback for a decision is influenced by the learner\u2019s current policy. Based on this insight, we introduce Convergent Actor-Critic by Humans (COACH), an algorithm for learning from policy-dependent feedback that converges to a local optimum. Finally, we demonstrate that COACH can successfully learn multiple behaviors on a physical robot.}\n}", "pdf": "http://proceedings.mlr.press/v70/macglashan17a/macglashan17a.pdf", "supp": "", "pdf_size": 410757, "gs_citation": 387, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7479379499615521862&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Cogitai\u00b9; Brown University\u00b2; North Carolina State University\u00b3; Washington State University\u2074; Brown University\u00b2; North Carolina State University\u00b3; Washington State University\u2074; Brown University\u00b2", "aff_domain": "cogitai.com; ; ; ; ; ; ; ", "email": "cogitai.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/macglashan17a.html", "aff_unique_index": "0;1;2;3;1;2;3;1", "aff_unique_norm": "Cogitai;Brown University;North Carolina State University;Washington State University", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.cogitai.com;https://www.brown.edu;https://www.ncsu.edu;https://wsu.edu", "aff_unique_abbr": ";Brown;NCSU;WSU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Iterative Machine Teaching", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/858", "id": "858", "author_site": "Weiyang Liu, Bo Dai, Ahmad Humayun, Charlene Tay, Chen Yu, Linda Smith, James Rehg, Le Song", "author": "Weiyang Liu; Bo Dai; Ahmad Humayun; Charlene Tay; Chen Yu; Linda B. Smith; James M. Rehg; Le Song", "abstract": "In this paper, we consider the problem of machine teaching, the inverse problem of machine learning. Different from traditional machine teaching which views the learners as batch algorithms, we study a new paradigm where the learner uses an iterative algorithm and a teacher can feed examples sequentially and intelligently based on the current performance of the learner. We show that the teaching complexity in the iterative case is very different from that in the batch case. Instead of constructing a minimal training set for learners, our iterative machine teaching focuses on achieving fast convergence in the learner model. Depending on the level of information the teacher has from the learner model, we design teaching algorithms which can provably reduce the number of teaching examples and achieve faster convergence than learning without teachers. We also validate our theoretical findings with extensive experiments on different data distribution and real image datasets.", "bibtex": "@InProceedings{pmlr-v70-liu17b,\n title = \t {Iterative Machine Teaching},\n author = {Weiyang Liu and Bo Dai and Ahmad Humayun and Charlene Tay and Chen Yu and Linda B. Smith and James M. Rehg and Le Song},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2149--2158},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17b/liu17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17b.html},\n abstract = \t {In this paper, we consider the problem of machine teaching, the inverse problem of machine learning. Different from traditional machine teaching which views the learners as batch algorithms, we study a new paradigm where the learner uses an iterative algorithm and a teacher can feed examples sequentially and intelligently based on the current performance of the learner. We show that the teaching complexity in the iterative case is very different from that in the batch case. Instead of constructing a minimal training set for learners, our iterative machine teaching focuses on achieving fast convergence in the learner model. Depending on the level of information the teacher has from the learner model, we design teaching algorithms which can provably reduce the number of teaching examples and achieve faster convergence than learning without teachers. We also validate our theoretical findings with extensive experiments on different data distribution and real image datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/liu17b/liu17b.pdf", "supp": "", "pdf_size": 3446948, "gs_citation": 175, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10044410972786814018&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/liu17b.html" }, { "title": "Joint Dimensionality Reduction and Metric Learning: A Geometric Take", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/561", "id": "561", "author_site": "Mehrtash Harandi, Mathieu Salzmann, Richard I Hartley", "author": "Mehrtash Harandi; Mathieu Salzmann; Richard Hartley", "abstract": "To be tractable and robust to data noise, existing metric learning algorithms commonly rely on PCA as a pre-processing step. How can we know, however, that PCA, or any other specific dimensionality reduction technique, is the method of choice for the problem at hand? The answer is simple: We cannot! To address this issue, in this paper, we develop a Riemannian framework to jointly learn a mapping performing dimensionality reduction and a metric in the induced space. Our experiments evidence that, while we directly work on high-dimensional features, our approach yields competitive runtimes with and higher accuracy than state-of-the-art metric learning algorithms.", "bibtex": "@InProceedings{pmlr-v70-harandi17a,\n title = \t {Joint Dimensionality Reduction and Metric Learning: A Geometric Take},\n author = {Mehrtash Harandi and Mathieu Salzmann and Richard Hartley},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1404--1413},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/harandi17a/harandi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/harandi17a.html},\n abstract = \t {To be tractable and robust to data noise, existing metric learning algorithms commonly rely on PCA as a pre-processing step. How can we know, however, that PCA, or any other specific dimensionality reduction technique, is the method of choice for the problem at hand? The answer is simple: We cannot! To address this issue, in this paper, we develop a Riemannian framework to jointly learn a mapping performing dimensionality reduction and a metric in the induced space. Our experiments evidence that, while we directly work on high-dimensional features, our approach yields competitive runtimes with and higher accuracy than state-of-the-art metric learning algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/harandi17a/harandi17a.pdf", "supp": "", "pdf_size": 608857, "gs_citation": 80, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1943021562754188889&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Data61, CSIRO, Canberra, Australia+Australian National University, Canberra, Australia; CVLab, EPFL, Switzerland; Australian National University, Canberra, Australia", "aff_domain": "anu.edu.au; ; ", "email": "anu.edu.au; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/harandi17a.html", "aff_unique_index": "0+1;2;1", "aff_unique_norm": "CSIRO;Australian National University;EPFL", "aff_unique_dep": "Data61;;CVLab", "aff_unique_url": "https://www.csiro.au;https://www.anu.edu.au;https://cvlab.epfl.ch", "aff_unique_abbr": "CSIRO;ANU;EPFL", "aff_campus_unique_index": "0+0;0", "aff_campus_unique": "Canberra;", "aff_country_unique_index": "0+0;1;0", "aff_country_unique": "Australia;Switzerland" }, { "title": "Just Sort It! A Simple and Effective Approach to Active Preference Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/592", "id": "592", "author_site": "Lucas Maystre, Matthias Grossglauser", "author": "Lucas Maystre; Matthias Grossglauser", "abstract": "We address the problem of learning a ranking by using adaptively chosen pairwise comparisons. Our goal is to recover the ranking accurately but to sample the comparisons sparingly. If all comparison outcomes are consistent with the ranking, the optimal solution is to use an efficient sorting algorithm, such as Quicksort. But how do sorting algorithms behave if some comparison outcomes are inconsistent with the ranking? We give favorable guarantees for Quicksort for the popular Bradley-Terry model, under natural assumptions on the parameters. Furthermore, we empirically demonstrate that sorting algorithms lead to a very simple and effective active learning strategy: repeatedly sort the items. This strategy performs as well as state-of-the-art methods (and much better than random sampling) at a minuscule fraction of the computational cost.", "bibtex": "@InProceedings{pmlr-v70-maystre17a,\n title = \t {Just Sort It! {A} Simple and Effective Approach to Active Preference Learning},\n author = {Lucas Maystre and Matthias Grossglauser},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2344--2353},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/maystre17a/maystre17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/maystre17a.html},\n abstract = \t {We address the problem of learning a ranking by using adaptively chosen pairwise comparisons. Our goal is to recover the ranking accurately but to sample the comparisons sparingly. If all comparison outcomes are consistent with the ranking, the optimal solution is to use an efficient sorting algorithm, such as Quicksort. But how do sorting algorithms behave if some comparison outcomes are inconsistent with the ranking? We give favorable guarantees for Quicksort for the popular Bradley-Terry model, under natural assumptions on the parameters. Furthermore, we empirically demonstrate that sorting algorithms lead to a very simple and effective active learning strategy: repeatedly sort the items. This strategy performs as well as state-of-the-art methods (and much better than random sampling) at a minuscule fraction of the computational cost.}\n}", "pdf": "http://proceedings.mlr.press/v70/maystre17a/maystre17a.pdf", "supp": "", "pdf_size": 249866, "gs_citation": 72, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6547556421567403014&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "School of Computer and Communication Sciences, EPFL; School of Computer and Communication Sciences, EPFL", "aff_domain": "epfl.ch; ", "email": "epfl.ch; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/maystre17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Ecole Polytechnique Federale de Lausanne", "aff_unique_dep": "School of Computer and Communication Sciences", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Kernelized Support Tensor Machines", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/516", "id": "516", "author_site": "Lifang He, Chun-Ta Lu, Guixiang Ma, Shen Wang, Linlin Shen, Philip Yu, Ann Ragin", "author": "Lifang He; Chun-Ta Lu; Guixiang Ma; Shen Wang; Linlin Shen; Philip S. Yu; Ann B. Ragin", "abstract": "In the context of supervised tensor learning, preserving the structural information and exploiting the discriminative nonlinear relationships of tensor data are crucial for improving the performance of learning tasks. Based on tensor factorization theory and kernel methods, we propose a novel Kernelized Support Tensor Machine (KSTM) which integrates kernelized tensor factorization with maximum-margin criterion. Specifically, the kernelized factorization technique is introduced to approximate the tensor data in kernel space such that the complex nonlinear relationships within tensor data can be explored. Further, dual structural preserving kernels are devised to learn the nonlinear boundary between tensor data. As a result of joint optimization, the kernels obtained in KSTM exhibit better generalization power to discriminative analysis. The experimental results on real-world neuroimaging datasets show the superiority of KSTM over the state-of-the-art techniques.", "bibtex": "@InProceedings{pmlr-v70-he17a,\n title = \t {Kernelized Support Tensor Machines},\n author = {Lifang He and Chun-Ta Lu and Guixiang Ma and Shen Wang and Linlin Shen and Philip S. Yu and Ann B. Ragin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1442--1451},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/he17a/he17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/he17a.html},\n abstract = \t {In the context of supervised tensor learning, preserving the structural information and exploiting the discriminative nonlinear relationships of tensor data are crucial for improving the performance of learning tasks. Based on tensor factorization theory and kernel methods, we propose a novel Kernelized Support Tensor Machine (KSTM) which integrates kernelized tensor factorization with maximum-margin criterion. Specifically, the kernelized factorization technique is introduced to approximate the tensor data in kernel space such that the complex nonlinear relationships within tensor data can be explored. Further, dual structural preserving kernels are devised to learn the nonlinear boundary between tensor data. As a result of joint optimization, the kernels obtained in KSTM exhibit better generalization power to discriminative analysis. The experimental results on real-world neuroimaging datasets show the superiority of KSTM over the state-of-the-art techniques.}\n}", "pdf": "http://proceedings.mlr.press/v70/he17a/he17a.pdf", "supp": "", "pdf_size": 903245, "gs_citation": 71, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=602778878980517371&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA; Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA; Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA; Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA; Institute for Computer Vision, Shenzhen University, Shenzhen, China; Institute for Data Science, Tsinghua University, Beijing, China + Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA; Department of Radiology, Northwestern University, Chicago, IL, USA", "aff_domain": "uic.edu;uic.edu;uic.edu;uic.edu;szu.edu.cn;uic.edu;northwestern.edu", "email": "uic.edu;uic.edu;uic.edu;uic.edu;szu.edu.cn;uic.edu;northwestern.edu", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/he17a.html", "aff_unique_index": "0;0;0;0;1;2+0;3", "aff_unique_norm": "University of Illinois at Chicago;Shenzhen University;Tsinghua University;Northwestern University", "aff_unique_dep": "Department of Computer Science;Institute for Computer Vision;Institute for Data Science;Department of Radiology", "aff_unique_url": "https://www.uic.edu;https://www.szu.edu.cn;https://www.tsinghua.edu.cn;https://www.northwestern.edu", "aff_unique_abbr": "UIC;;Tsinghua;NU", "aff_campus_unique_index": "0;0;0;0;1;2+0;0", "aff_campus_unique": "Chicago;Shenzhen;Beijing", "aff_country_unique_index": "0;0;0;0;1;1+0;0", "aff_country_unique": "United States;China" }, { "title": "Know-Evolve: Deep Temporal Reasoning for Dynamic Knowledge Graphs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/882", "id": "882", "author_site": "Rakshit Trivedi, Hanjun Dai, Yichen Wang, Le Song", "author": "Rakshit Trivedi; Hanjun Dai; Yichen Wang; Le Song", "abstract": "The availability of large scale event data with time stamps has given rise to dynamically evolving knowledge graphs that contain temporal information for each edge. Reasoning over time in such dynamic knowledge graphs is not yet well understood. To this end, we present Know-Evolve, a novel deep evolutionary knowledge network that learns non-linearly evolving entity representations over time. The occurrence of a fact (edge) is modeled as a multivariate point process whose intensity function is modulated by the score for that fact computed based on the learned entity embeddings. We demonstrate significantly improved performance over various relational learning approaches on two large scale real-world datasets. Further, our method effectively predicts occurrence or recurrence time of a fact which is novel compared to prior reasoning approaches in multi-relational setting.", "bibtex": "@InProceedings{pmlr-v70-trivedi17a,\n title = \t {Know-Evolve: Deep Temporal Reasoning for Dynamic Knowledge Graphs},\n author = {Rakshit Trivedi and Hanjun Dai and Yichen Wang and Le Song},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3462--3471},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/trivedi17a/trivedi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/trivedi17a.html},\n abstract = \t {The availability of large scale event data with time stamps has given rise to dynamically evolving knowledge graphs that contain temporal information for each edge. Reasoning over time in such dynamic knowledge graphs is not yet well understood. To this end, we present Know-Evolve, a novel deep evolutionary knowledge network that learns non-linearly evolving entity representations over time. The occurrence of a fact (edge) is modeled as a multivariate point process whose intensity function is modulated by the score for that fact computed based on the learned entity embeddings. We demonstrate significantly improved performance over various relational learning approaches on two large scale real-world datasets. Further, our method effectively predicts occurrence or recurrence time of a fact which is novel compared to prior reasoning approaches in multi-relational setting.}\n}", "pdf": "http://proceedings.mlr.press/v70/trivedi17a/trivedi17a.pdf", "supp": "", "pdf_size": 1409366, "gs_citation": 658, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4482301806882870821&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "College of Computing, Georgia Institute of Technology; College of Computing, Georgia Institute of Technology; College of Computing, Georgia Institute of Technology; College of Computing, Georgia Institute of Technology", "aff_domain": "gatech.edu; ; ;cc.gatech.edu", "email": "gatech.edu; ; ;cc.gatech.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/trivedi17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "College of Computing", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Language Modeling with Gated Convolutional Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/887", "id": "887", "author_site": "Yann Dauphin, Angela Fan, Michael Auli, David Grangier", "author": "Yann N. Dauphin; Angela Fan; Michael Auli; David Grangier", "abstract": "The pre-dominant approach to language modeling to date is based on recurrent neural networks. Their success on this task is often linked to their ability to capture unbounded context. In this paper we develop a finite context approach through stacked convolutions, which can be more efficient since they allow parallelization over sequential tokens. We propose a novel simplified gating mechanism that outperforms Oord et al. (2016) and investigate the impact of key architectural decisions. The proposed approach achieves state-of-the-art on the WikiText-103 benchmark, even though it features long-term dependencies, as well as competitive results on the Google Billion Words benchmark. Our model reduces the latency to score a sentence by an order of magnitude compared to a recurrent baseline. To our knowledge, this is the first time a non-recurrent approach is competitive with strong recurrent models on these large scale language tasks.", "bibtex": "@InProceedings{pmlr-v70-dauphin17a,\n title = \t {Language Modeling with Gated Convolutional Networks},\n author = {Yann N. Dauphin and Angela Fan and Michael Auli and David Grangier},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {933--941},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dauphin17a/dauphin17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dauphin17a.html},\n abstract = \t {The pre-dominant approach to language modeling to date is based on recurrent neural networks. Their success on this task is often linked to their ability to capture unbounded context. In this paper we develop a finite context approach through stacked convolutions, which can be more efficient since they allow parallelization over sequential tokens. We propose a novel simplified gating mechanism that outperforms Oord et al. (2016) and investigate the impact of key architectural decisions. The proposed approach achieves state-of-the-art on the WikiText-103 benchmark, even though it features long-term dependencies, as well as competitive results on the Google Billion Words benchmark. Our model reduces the latency to score a sentence by an order of magnitude compared to a recurrent baseline. To our knowledge, this is the first time a non-recurrent approach is competitive with strong recurrent models on these large scale language tasks.}\n}", "pdf": "http://proceedings.mlr.press/v70/dauphin17a/dauphin17a.pdf", "supp": "", "pdf_size": 775790, "gs_citation": 3245, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8077064900826453934&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com; ; ; ", "email": "fb.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/dauphin17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Large-Scale Evolution of Image Classifiers", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/634", "id": "634", "author_site": "Esteban Real, Sherry Moore, Andrew Selle, Saurabh Saxena, Yutaka Leon Suematsu, Jie Tan, Quoc Le, Alexey Kurakin", "author": "Esteban Real; Sherry Moore; Andrew Selle; Saurabh Saxena; Yutaka Leon Suematsu; Jie Tan; Quoc V. Le; Alexey Kurakin", "abstract": "Neural networks have proven effective at solving difficult problems but designing their architectures can be challenging, even for image classification problems alone. Our goal is to minimize human participation, so we employ evolutionary algorithms to discover such networks automatically. Despite significant computational requirements, we show that it is now possible to evolve models with accuracies within the range of those published in the last year. Specifically, we employ simple evolutionary techniques at unprecedented scales to discover models for the CIFAR-10 and CIFAR-100 datasets, starting from trivial initial conditions and reaching accuracies of 94.6\\% (95.6\\% for ensemble) and 77.0\\%, respectively. To do this, we use novel and intuitive mutation operators that navigate large search spaces; we stress that no human participation is required once evolution starts and that the output is a fully-trained model. Throughout this work, we place special emphasis on the repeatability of results, the variability in the outcomes and the computational requirements.", "bibtex": "@InProceedings{pmlr-v70-real17a,\n title = \t {Large-Scale Evolution of Image Classifiers},\n author = {Esteban Real and Sherry Moore and Andrew Selle and Saurabh Saxena and Yutaka Leon Suematsu and Jie Tan and Quoc V. Le and Alexey Kurakin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2902--2911},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/real17a/real17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/real17a.html},\n abstract = \t {Neural networks have proven effective at solving difficult problems but designing their architectures can be challenging, even for image classification problems alone. Our goal is to minimize human participation, so we employ evolutionary algorithms to discover such networks automatically. Despite significant computational requirements, we show that it is now possible to evolve models with accuracies within the range of those published in the last year. Specifically, we employ simple evolutionary techniques at unprecedented scales to discover models for the CIFAR-10 and CIFAR-100 datasets, starting from trivial initial conditions and reaching accuracies of 94.6\\% (95.6\\% for ensemble) and 77.0\\%, respectively. To do this, we use novel and intuitive mutation operators that navigate large search spaces; we stress that no human participation is required once evolution starts and that the output is a fully-trained model. Throughout this work, we place special emphasis on the repeatability of results, the variability in the outcomes and the computational requirements.}\n}", "pdf": "http://proceedings.mlr.press/v70/real17a/real17a.pdf", "supp": "", "pdf_size": 3677331, "gs_citation": 2148, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2681013556507309683&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Research, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA", "aff_domain": "google.com; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/real17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Latent Feature Lasso", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/867", "id": "867", "author_site": "En-Hsu Yen, Wei-Cheng Lee, Sung-En Chang, Arun Suggala, Shou-De Lin, Pradeep Ravikumar", "author": "Ian En-Hsu Yen; Wei-Cheng Lee; Sung-En Chang; Arun Sai Suggala; Shou-De Lin; Pradeep Ravikumar", "abstract": "The latent feature model (LFM), proposed in (Griffiths \\& Ghahramani, 2005), but possibly with earlier origins, is a generalization of a mixture model, where each instance is generated not from a single latent class but from a combination of", "bibtex": "@InProceedings{pmlr-v70-yen17a,\n title = \t {Latent Feature Lasso},\n author = {Ian En-Hsu Yen and Wei-Cheng Lee and Sung-En Chang and Arun Sai Suggala and Shou-De Lin and Pradeep Ravikumar},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3949--3957},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yen17a/yen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/yen17a.html},\n abstract = \t {The latent feature model (LFM), proposed in (Griffiths \\& Ghahramani, 2005), but possibly with earlier origins, is a generalization of a mixture model, where each instance is generated not from a single latent class but from a combination of", "pdf": "http://proceedings.mlr.press/v70/yen17a/yen17a.pdf", "supp": "", "pdf_size": 440135, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14994679870791210999&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Carnegie Mellon University, U.S.A.; National Taiwan University, Taiwan; National Taiwan University, Taiwan; Carnegie Mellon University, U.S.A.; National Taiwan University, Taiwan; Carnegie Mellon University, U.S.A.", "aff_domain": "cs.cmu.edu; ; ; ; ; ", "email": "cs.cmu.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/yen17a.html", "aff_unique_index": "0;1;1;0;1;0", "aff_unique_norm": "Carnegie Mellon University;National Taiwan University", "aff_unique_dep": ";", "aff_unique_url": "https://www.cmu.edu;https://www.ntu.edu.tw", "aff_unique_abbr": "CMU;NTU", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Taiwan", "aff_country_unique_index": "0;1;1;0;1;0", "aff_country_unique": "United States;China" }, { "title": "Latent Intention Dialogue Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/746", "id": "746", "author_site": "Tsung-Hsien Wen, Yishu Miao, Phil Blunsom, Stephen J Young", "author": "Tsung-Hsien Wen; Yishu Miao; Phil Blunsom; Steve Young", "abstract": "Developing a dialogue agent that is capable of making autonomous decisions and communicating by natural language is one of the long-term goals of machine learning research. The traditional approaches either rely on hand-crafting a small state-action set for applying reinforcement learning that is not scalable or constructing deterministic models for learning dialogue sentences that fail to capture the conversational stochasticity. In this paper, however, we propose a Latent Intention Dialogue Model that employs a discrete latent variable to learn underlying dialogue intentions in the framework of neural variational inference. Additionally, in a goal-oriented dialogue scenario, the latent intentions can be interpreted as actions guiding the generation of machine responses, which can be further refined autonomously by reinforcement learning. The experiments demonstrate the effectiveness of discrete latent variable models on learning goal-oriented dialogues, and the results outperform the published benchmarks on both corpus-based evaluation and human evaluation.", "bibtex": "@InProceedings{pmlr-v70-wen17a,\n title = \t {Latent Intention Dialogue Models},\n author = {Tsung-Hsien Wen and Yishu Miao and Phil Blunsom and Steve Young},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3732--3741},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wen17a/wen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wen17a.html},\n abstract = \t {Developing a dialogue agent that is capable of making autonomous decisions and communicating by natural language is one of the long-term goals of machine learning research. The traditional approaches either rely on hand-crafting a small state-action set for applying reinforcement learning that is not scalable or constructing deterministic models for learning dialogue sentences that fail to capture the conversational stochasticity. In this paper, however, we propose a Latent Intention Dialogue Model that employs a discrete latent variable to learn underlying dialogue intentions in the framework of neural variational inference. Additionally, in a goal-oriented dialogue scenario, the latent intentions can be interpreted as actions guiding the generation of machine responses, which can be further refined autonomously by reinforcement learning. The experiments demonstrate the effectiveness of discrete latent variable models on learning goal-oriented dialogues, and the results outperform the published benchmarks on both corpus-based evaluation and human evaluation.}\n}", "pdf": "http://proceedings.mlr.press/v70/wen17a/wen17a.pdf", "supp": "", "pdf_size": 501314, "gs_citation": 185, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16858014231401309317&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Engineering, University of Cambridge, Cambridge, United Kingdom+Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Computer Science, University of Oxford, Oxford, United Kingdom; Department of Engineering, University of Cambridge, Cambridge, United Kingdom", "aff_domain": "cam.ac.uk;cs.ox.ac.uk; ; ", "email": "cam.ac.uk;cs.ox.ac.uk; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/wen17a.html", "aff_unique_index": "0+1;1;1;0", "aff_unique_norm": "University of Cambridge;University of Oxford", "aff_unique_dep": "Department of Engineering;Department of Computer Science", "aff_unique_url": "https://www.cam.ac.uk;https://www.ox.ac.uk", "aff_unique_abbr": "Cambridge;Oxford", "aff_campus_unique_index": "0+1;1;1;0", "aff_campus_unique": "Cambridge;Oxford", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Latent LSTM Allocation: Joint Clustering and Non-Linear Dynamic Modeling of Sequence Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/817", "id": "817", "author_site": "Manzil Zaheer, Amr Ahmed, Alex Smola", "author": "Manzil Zaheer; Amr Ahmed; Alexander J. Smola", "abstract": "Recurrent neural networks, such as long-short term memory (LSTM) networks, are powerful tools for modeling sequential data like user browsing history (Tan et al., 2016; Korpusik et al., 2016) or natural language text (Mikolov et al., 2010). However, to generalize across different user types, LSTMs require a large number of parameters, notwithstanding the simplicity of the underlying dynamics, rendering it uninterpretable, which is highly undesirable in user modeling. The increase in complexity and parameters arises due to a large action space in which many of the actions have similar intent or topic. In this paper, we introduce Latent LSTM Allocation (LLA) for user modeling combining hierarchical Bayesian models with LSTMs. In LLA, each user is modeled as a sequence of actions, and the model jointly groups actions into topics and learns the temporal dynamics over the topic sequence, instead of action space directly. This leads to a model that is highly interpretable, concise, and can capture intricate dynamics. We present an efficient Stochastic EM inference algorithm for our model that scales to millions of users/documents. Our experimental evaluations show that the proposed model compares favorably with several state-of-the-art baselines.", "bibtex": "@InProceedings{pmlr-v70-zaheer17a,\n title = \t {Latent {LSTM} Allocation: Joint Clustering and Non-Linear Dynamic Modeling of Sequence Data},\n author = {Manzil Zaheer and Amr Ahmed and Alexander J. Smola},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3967--3976},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zaheer17a/zaheer17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zaheer17a.html},\n abstract = \t {Recurrent neural networks, such as long-short term memory (LSTM) networks, are powerful tools for modeling sequential data like user browsing history (Tan et al., 2016; Korpusik et al., 2016) or natural language text (Mikolov et al., 2010). However, to generalize across different user types, LSTMs require a large number of parameters, notwithstanding the simplicity of the underlying dynamics, rendering it uninterpretable, which is highly undesirable in user modeling. The increase in complexity and parameters arises due to a large action space in which many of the actions have similar intent or topic. In this paper, we introduce Latent LSTM Allocation (LLA) for user modeling combining hierarchical Bayesian models with LSTMs. In LLA, each user is modeled as a sequence of actions, and the model jointly groups actions into topics and learns the temporal dynamics over the topic sequence, instead of action space directly. This leads to a model that is highly interpretable, concise, and can capture intricate dynamics. We present an efficient Stochastic EM inference algorithm for our model that scales to millions of users/documents. Our experimental evaluations show that the proposed model compares favorably with several state-of-the-art baselines.}\n}", "pdf": "http://proceedings.mlr.press/v70/zaheer17a/zaheer17a.pdf", "supp": "", "pdf_size": 688620, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5500830993621163745&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University, Pittsburgh PA \u2013 work done while at Google; Google Inc, Mountain View CA; Carnegie Mellon University, Pittsburgh PA", "aff_domain": "cmu.edu; ; ", "email": "cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zaheer17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Carnegie Mellon University;Google", "aff_unique_dep": ";Google", "aff_unique_url": "https://www.cmu.edu;https://www.google.com", "aff_unique_abbr": "CMU;Google", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Pittsburgh;Mountain View", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Lazifying Conditional Gradient Algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/509", "id": "509", "author_site": "G\u00e1bor Braun, Sebastian Pokutta, Daniel Zink", "author": "G\u00e1bor Braun; Sebastian Pokutta; Daniel Zink", "abstract": "Conditional gradient algorithms (also often called Frank-Wolfe algorithms) are popular due to their simplicity of only requiring a linear optimization oracle and more recently they also gained significant traction for online learning. While simple in principle, in many cases the actual implementation of the linear optimization oracle is costly. We show a general method to lazify various conditional gradient algorithms, which in actual computations leads to several orders of magnitude of speedup in wall-clock time. This is achieved by using a faster separation oracle instead of a linear optimization oracle, relying only on few linear optimization oracle calls.", "bibtex": "@InProceedings{pmlr-v70-braun17a,\n title = \t {Lazifying Conditional Gradient Algorithms},\n author = {G{\\'a}bor Braun and Sebastian Pokutta and Daniel Zink},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {566--575},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/braun17a/braun17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/braun17a.html},\n abstract = \t {Conditional gradient algorithms (also often called Frank-Wolfe algorithms) are popular due to their simplicity of only requiring a linear optimization oracle and more recently they also gained significant traction for online learning. While simple in principle, in many cases the actual implementation of the linear optimization oracle is costly. We show a general method to lazify various conditional gradient algorithms, which in actual computations leads to several orders of magnitude of speedup in wall-clock time. This is achieved by using a faster separation oracle instead of a linear optimization oracle, relying only on few linear optimization oracle calls.}\n}", "pdf": "http://proceedings.mlr.press/v70/braun17a/braun17a.pdf", "supp": "", "pdf_size": 1586857, "gs_citation": 61, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4174015310715259564&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "ISyE, Georgia Institute of Technology; ISyE, Georgia Institute of Technology; ISyE, Georgia Institute of Technology", "aff_domain": "gatech.edu; ; ", "email": "gatech.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/braun17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Georgia Institute of Technology", "aff_unique_dep": "Industrial and Systems Engineering", "aff_unique_url": "https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Learned Optimizers that Scale and Generalize", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/780", "id": "780", "author_site": "Olga Wichrowska, Niru Maheswaranathan, Matthew Hoffman, Sergio G\u00f3mez Colmenarejo, Misha Denil, Nando de Freitas, Jascha Sohl-Dickstein", "author": "Olga Wichrowska; Niru Maheswaranathan; Matthew W. Hoffman; Sergio G\u00f3mez Colmenarejo; Misha Denil; Nando Freitas; Jascha Sohl-Dickstein", "abstract": "Learning to learn has emerged as an important direction for achieving artificial intelligence. Two of the primary barriers to its adoption are an inability to scale to larger problems and a limited ability to generalize to new tasks. We introduce a learned gradient descent optimizer that generalizes well to new tasks, and which has significantly reduced memory and computation overhead. We achieve this by introducing a novel hierarchical RNN architecture, with minimal per-parameter overhead, augmented with additional architectural features that mirror the known structure of optimization tasks. We also develop a meta-training ensemble of small, diverse, optimization tasks capturing common properties of loss landscapes. The optimizer learns to outperform RMSProp/ADAM on problems in this corpus. More importantly, it performs comparably or better when applied to small convolutional neural networks, despite seeing no neural networks in its meta-training set. Finally, it generalizes to train Inception V3 and ResNet V2 architectures on the ImageNet dataset for thousands of steps, optimization problems that are of a vastly different scale than those it was trained on.", "bibtex": "@InProceedings{pmlr-v70-wichrowska17a,\n title = \t {Learned Optimizers that Scale and Generalize},\n author = {Olga Wichrowska and Niru Maheswaranathan and Matthew W. Hoffman and Sergio G{\\'o}mez Colmenarejo and Misha Denil and Nando de Freitas and Jascha Sohl-Dickstein},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3751--3760},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wichrowska17a/wichrowska17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wichrowska17a.html},\n abstract = \t {Learning to learn has emerged as an important direction for achieving artificial intelligence. Two of the primary barriers to its adoption are an inability to scale to larger problems and a limited ability to generalize to new tasks. We introduce a learned gradient descent optimizer that generalizes well to new tasks, and which has significantly reduced memory and computation overhead. We achieve this by introducing a novel hierarchical RNN architecture, with minimal per-parameter overhead, augmented with additional architectural features that mirror the known structure of optimization tasks. We also develop a meta-training ensemble of small, diverse, optimization tasks capturing common properties of loss landscapes. The optimizer learns to outperform RMSProp/ADAM on problems in this corpus. More importantly, it performs comparably or better when applied to small convolutional neural networks, despite seeing no neural networks in its meta-training set. Finally, it generalizes to train Inception V3 and ResNet V2 architectures on the ImageNet dataset for thousands of steps, optimization problems that are of a vastly different scale than those it was trained on.}\n}", "pdf": "http://proceedings.mlr.press/v70/wichrowska17a/wichrowska17a.pdf", "supp": "", "pdf_size": 2573261, "gs_citation": 349, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15967731055470928461&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Google Brain; Google Brain+Stanford University; Stanford University; Deepmind; Deepmind; Deepmind; Google Brain", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/wichrowska17a.html", "aff_unique_index": "0;0+1;1;2;2;2;0", "aff_unique_norm": "Google;Stanford University;DeepMind", "aff_unique_dep": "Google Brain;;", "aff_unique_url": "https://brain.google.com;https://www.stanford.edu;https://deepmind.com", "aff_unique_abbr": "Google Brain;Stanford;DeepMind", "aff_campus_unique_index": "0;0+1;1;0", "aff_campus_unique": "Mountain View;Stanford;", "aff_country_unique_index": "0;0+0;0;1;1;1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Learning Algorithms for Active Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/880", "id": "880", "author_site": "Philip Bachman, Alessandro Sordoni, Adam Trischler", "author": "Philip Bachman; Alessandro Sordoni; Adam Trischler", "abstract": "We introduce a model that learns active learning algorithms via metalearning. For a distribution of related tasks, our model jointly learns: a data representation, an item selection heuristic, and a prediction function. Our model uses the item selection heuristic to construct a labeled support set for training the prediction function. Using the Omniglot and MovieLens datasets, we test our model in synthetic and practical settings.", "bibtex": "@InProceedings{pmlr-v70-bachman17a,\n title = \t {Learning Algorithms for Active Learning},\n author = {Philip Bachman and Alessandro Sordoni and Adam Trischler},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {301--310},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bachman17a/bachman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bachman17a.html},\n abstract = \t {We introduce a model that learns active learning algorithms via metalearning. For a distribution of related tasks, our model jointly learns: a data representation, an item selection heuristic, and a prediction function. Our model uses the item selection heuristic to construct a labeled support set for training the prediction function. Using the Omniglot and MovieLens datasets, we test our model in synthetic and practical settings.}\n}", "pdf": "http://proceedings.mlr.press/v70/bachman17a/bachman17a.pdf", "supp": "", "pdf_size": 457035, "gs_citation": 215, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15969984886065910507&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Microsoft Maluuba, Montreal, Canada; Microsoft Maluuba, Montreal, Canada; Microsoft Maluuba, Montreal, Canada", "aff_domain": "microsoft.com;microsoft.com; ", "email": "microsoft.com;microsoft.com; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/bachman17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Maluuba", "aff_unique_url": "https://www.microsoft.com", "aff_unique_abbr": "Microsoft", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Montreal", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Canada" }, { "title": "Learning Continuous Semantic Representations of Symbolic Expressions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/658", "id": "658", "author_site": "Miltiadis Allamanis, pankajan Chanthirasegaran, Pushmeet Kohli, Charles Sutton", "author": "Miltiadis Allamanis; Pankajan Chanthirasegaran; Pushmeet Kohli; Charles Sutton", "abstract": "Combining abstract, symbolic reasoning with continuous neural reasoning is a grand challenge of representation learning. As a step in this direction, we propose a new architecture, called neural equivalence network, for the problem of learning continuous semantic representations of algebraic and logical expressions. These networks are trained to represent semantic equivalence, even of expressions that are syntactically very different. The challenge is that semantic representations must be computed in a syntax-directed manner, because semantics is compositional, but at the same time, small changes in syntax can lead to very large changes in semantics, which can be difficult for continuous neural architectures. We perform an exhaustive evaluation on the task of checking equivalence on a highly diverse class of symbolic algebraic and boolean expression types, showing that our model significantly outperforms existing architectures.", "bibtex": "@InProceedings{pmlr-v70-allamanis17a,\n title = \t {Learning Continuous Semantic Representations of Symbolic Expressions},\n author = {Miltiadis Allamanis and Pankajan Chanthirasegaran and Pushmeet Kohli and Charles Sutton},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {80--88},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allamanis17a/allamanis17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/allamanis17a.html},\n abstract = \t {Combining abstract, symbolic reasoning with continuous neural reasoning is a grand challenge of representation learning. As a step in this direction, we propose a new architecture, called neural equivalence network, for the problem of learning continuous semantic representations of algebraic and logical expressions. These networks are trained to represent semantic equivalence, even of expressions that are syntactically very different. The challenge is that semantic representations must be computed in a syntax-directed manner, because semantics is compositional, but at the same time, small changes in syntax can lead to very large changes in semantics, which can be difficult for continuous neural architectures. We perform an exhaustive evaluation on the task of checking equivalence on a highly diverse class of symbolic algebraic and boolean expression types, showing that our model significantly outperforms existing architectures.}\n}", "pdf": "http://proceedings.mlr.press/v70/allamanis17a/allamanis17a.pdf", "supp": "", "pdf_size": 1413243, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11184386302743667447&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Microsoft Research, Cambridge, UK+The Alan Turing Institute, London, UK; University of Edinburgh, UK+The Alan Turing Institute, London, UK; DeepMind, London, UK; University of Edinburgh, UK+The Alan Turing Institute, London, UK", "aff_domain": "microsoft.com; ; ; ", "email": "microsoft.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/allamanis17a.html", "aff_unique_index": "0+1;2+1;3;2+1", "aff_unique_norm": "Microsoft;Alan Turing Institute;University of Edinburgh;DeepMind", "aff_unique_dep": "Microsoft Research;;;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.turing.ac.uk;https://www.ed.ac.uk;https://deepmind.com", "aff_unique_abbr": "MSR;ATI;Edinburgh;DeepMind", "aff_campus_unique_index": "0+1;1;1;1", "aff_campus_unique": "Cambridge;London;", "aff_country_unique_index": "0+0;0+0;0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Learning Deep Architectures via Generalized Whitened Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/488", "id": "488", "author": "Ping Luo", "abstract": "Whitened Neural Network (WNN) is a recent advanced deep architecture, which improves convergence and generalization of canonical neural networks by whitening their internal hidden representation. However, the whitening transformation increases computation time. Unlike WNN that reduced runtime by performing whitening every thousand iterations, which degenerates convergence due to the ill conditioning, we present generalized WNN (GWNN), which has three appealing properties. First, GWNN is able to learn compact representation to reduce computations. Second, it enables whitening transformation to be performed in a short period, preserving good conditioning. Third, we propose a data-independent estimation of the covariance matrix to further improve computational efficiency. Extensive experiments on various datasets demonstrate the benefits of GWNN.", "bibtex": "@InProceedings{pmlr-v70-luo17a,\n title = \t {Learning Deep Architectures via Generalized Whitened Neural Networks},\n author = {Ping Luo},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2238--2246},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/luo17a/luo17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/luo17a.html},\n abstract = \t {Whitened Neural Network (WNN) is a recent advanced deep architecture, which improves convergence and generalization of canonical neural networks by whitening their internal hidden representation. However, the whitening transformation increases computation time. Unlike WNN that reduced runtime by performing whitening every thousand iterations, which degenerates convergence due to the ill conditioning, we present generalized WNN (GWNN), which has three appealing properties. First, GWNN is able to learn compact representation to reduce computations. Second, it enables whitening transformation to be performed in a short period, preserving good conditioning. Third, we propose a data-independent estimation of the covariance matrix to further improve computational efficiency. Extensive experiments on various datasets demonstrate the benefits of GWNN.}\n}", "pdf": "http://proceedings.mlr.press/v70/luo17a/luo17a.pdf", "supp": "", "pdf_size": 1369288, "gs_citation": 49, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=24653560022359252&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China+Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong", "aff_domain": "ie.cuhk.edu.hk", "email": "ie.cuhk.edu.hk", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/luo17a.html", "aff_unique_index": "0+1", "aff_unique_norm": "Chinese Academy of Sciences;Chinese University of Hong Kong", "aff_unique_dep": "Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology;Multimedia Laboratory", "aff_unique_url": "http://www.cas.cn;https://www.cuhk.edu.hk", "aff_unique_abbr": "CAS;CUHK", "aff_campus_unique_index": "0+1", "aff_campus_unique": "Shenzhen;Hong Kong SAR", "aff_country_unique_index": "0+0", "aff_country_unique": "China" }, { "title": "Learning Deep Latent Gaussian Models with Markov Chain Monte Carlo", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/876", "id": "876", "author_site": "Matthew Hoffman", "author": "Matthew D. Hoffman", "abstract": "Deep latent Gaussian models are powerful and popular probabilistic models of high-dimensional data. These models are almost always fit using variational expectation-maximization, an approximation to true maximum-marginal-likelihood estimation. In this paper, we propose a different approach: rather than use a variational approximation (which produces biased gradient signals), we use Markov chain Monte Carlo (MCMC, which allows us to trade bias for computation). We find that our MCMC-based approach has several advantages: it yields higher held-out likelihoods, produces sharper images, and does not suffer from the variational overpruning effect. MCMC\u2019s additional computational overhead proves to be significant, but not prohibitive.", "bibtex": "@InProceedings{pmlr-v70-hoffman17a,\n title = \t {Learning Deep Latent {G}aussian Models with {M}arkov Chain {M}onte {C}arlo},\n author = {Matthew D. Hoffman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1510--1519},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hoffman17a/hoffman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hoffman17a.html},\n abstract = \t {Deep latent Gaussian models are powerful and popular probabilistic models of high-dimensional data. These models are almost always fit using variational expectation-maximization, an approximation to true maximum-marginal-likelihood estimation. In this paper, we propose a different approach: rather than use a variational approximation (which produces biased gradient signals), we use Markov chain Monte Carlo (MCMC, which allows us to trade bias for computation). We find that our MCMC-based approach has several advantages: it yields higher held-out likelihoods, produces sharper images, and does not suffer from the variational overpruning effect. MCMC\u2019s additional computational overhead proves to be significant, but not prohibitive.}\n}", "pdf": "http://proceedings.mlr.press/v70/hoffman17a/hoffman17a.pdf", "supp": "", "pdf_size": 709778, "gs_citation": 132, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6547506459364871598&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google, San Francisco, California, USA", "aff_domain": "google.com", "email": "google.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/hoffman17a.html", "aff_unique_index": "0", "aff_unique_norm": "Google", "aff_unique_dep": "Google", "aff_unique_url": "https://www.google.com", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0", "aff_campus_unique": "San Francisco", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Learning Determinantal Point Processes with Moments and Cycles", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/721", "id": "721", "author_site": "John C Urschel, Ankur Moitra, Philippe Rigollet, Victor-Emmanuel Brunel", "author": "John Urschel; Victor-Emmanuel Brunel; Ankur Moitra; Philippe Rigollet", "abstract": "Determinantal Point Processes (DPPs) are a family of probabilistic models that have a repulsive behavior, and lend themselves naturally to many tasks in machine learning where returning a diverse set of objects is important. While there are fast algorithms for sampling, marginalization and conditioning, much less is known about learning the parameters of a DPP. Our contribution is twofold: (i) we establish the optimal sample complexity achievable in this problem and show that it is governed by a natural parameter, which we call the cycle sparsity; (ii) we propose a provably fast combinatorial algorithm that implements the method of moments efficiently and achieves optimal sample complexity. Finally, we give experimental results that confirm our theoretical findings.", "bibtex": "@InProceedings{pmlr-v70-urschel17a,\n title = \t {Learning Determinantal Point Processes with Moments and Cycles},\n author = {John Urschel and Victor-Emmanuel Brunel and Ankur Moitra and Philippe Rigollet},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3511--3520},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/urschel17a/urschel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/urschel17a.html},\n abstract = \t {Determinantal Point Processes (DPPs) are a family of probabilistic models that have a repulsive behavior, and lend themselves naturally to many tasks in machine learning where returning a diverse set of objects is important. While there are fast algorithms for sampling, marginalization and conditioning, much less is known about learning the parameters of a DPP. Our contribution is twofold: (i) we establish the optimal sample complexity achievable in this problem and show that it is governed by a natural parameter, which we call the cycle sparsity; (ii) we propose a provably fast combinatorial algorithm that implements the method of moments efficiently and achieves optimal sample complexity. Finally, we give experimental results that confirm our theoretical findings.}\n}", "pdf": "http://proceedings.mlr.press/v70/urschel17a/urschel17a.pdf", "supp": "", "pdf_size": 312005, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9389510146848423444&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Mathematics, MIT, USA; Department of Mathematics, MIT, USA; Department of Mathematics, MIT, USA; Department of Mathematics, MIT, USA", "aff_domain": "mit.edu; ; ; ", "email": "mit.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/urschel17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Discrete Representations via Information Maximizing Self-Augmented Training", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/574", "id": "574", "author_site": "Weihua Hu, Takeru Miyato, Seiya Tokui, Eiichi Matsumoto, Masashi Sugiyama", "author": "Weihua Hu; Takeru Miyato; Seiya Tokui; Eiichi Matsumoto; Masashi Sugiyama", "abstract": "Learning discrete representations of data is a central machine learning task because of the compactness of the representations and ease of interpretation. The task includes clustering and hash learning as special cases. Deep neural networks are promising to be used because they can model the non-linearity of data and scale to large datasets. However, their model complexity is huge, and therefore, we need to carefully regularize the networks in order to learn useful representations that exhibit intended invariance for applications of interest. To this end, we propose a method called Information Maximizing Self-Augmented Training (IMSAT). In IMSAT, we use data augmentation to impose the invariance on discrete representations. More specifically, we encourage the predicted representations of augmented data points to be close to those of the original data points in an end-to-end fashion. At the same time, we maximize the information-theoretic dependency between data and their predicted discrete representations. Extensive experiments on benchmark datasets show that IMSAT produces state-of-the-art results for both clustering and unsupervised hash learning.", "bibtex": "@InProceedings{pmlr-v70-hu17b,\n title = \t {Learning Discrete Representations via Information Maximizing Self-Augmented Training},\n author = {Weihua Hu and Takeru Miyato and Seiya Tokui and Eiichi Matsumoto and Masashi Sugiyama},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1558--1567},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hu17b/hu17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/hu17b.html},\n abstract = \t {Learning discrete representations of data is a central machine learning task because of the compactness of the representations and ease of interpretation. The task includes clustering and hash learning as special cases. Deep neural networks are promising to be used because they can model the non-linearity of data and scale to large datasets. However, their model complexity is huge, and therefore, we need to carefully regularize the networks in order to learn useful representations that exhibit intended invariance for applications of interest. To this end, we propose a method called Information Maximizing Self-Augmented Training (IMSAT). In IMSAT, we use data augmentation to impose the invariance on discrete representations. More specifically, we encourage the predicted representations of augmented data points to be close to those of the original data points in an end-to-end fashion. At the same time, we maximize the information-theoretic dependency between data and their predicted discrete representations. Extensive experiments on benchmark datasets show that IMSAT produces state-of-the-art results for both clustering and unsupervised hash learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/hu17b/hu17b.pdf", "supp": "", "pdf_size": 486000, "gs_citation": 597, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6861637693851759310&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/hu17b.html" }, { "title": "Learning Gradient Descent: Better Generalization and Longer Horizons", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/875", "id": "875", "author_site": "Kaifeng Lyu, Shunhua Jiang, Jian Li", "author": "Kaifeng Lv; Shunhua Jiang; Jian Li", "abstract": "Training deep neural networks is a highly nontrivial task, involving carefully selecting appropriate training algorithms, scheduling step sizes and tuning other hyperparameters. Trying different combinations can be quite labor-intensive and time consuming. Recently, researchers have tried to use deep learning algorithms to exploit the landscape of the loss function of the training problem of interest, and learn how to optimize over it in an automatic way. In this paper, we propose a new learning-to-learn model and some useful and practical tricks. Our optimizer outperforms generic, hand-crafted optimization algorithms and state-of-the-art learning-to-learn optimizers by DeepMind in many tasks. We demonstrate the effectiveness of our algorithms on a number of tasks, including deep MLPs, CNNs, and simple LSTMs.", "bibtex": "@InProceedings{pmlr-v70-lv17a,\n title = \t {Learning Gradient Descent: Better Generalization and Longer Horizons},\n author = {Kaifeng Lv and Shunhua Jiang and Jian Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2247--2255},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lv17a/lv17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lv17a.html},\n abstract = \t {Training deep neural networks is a highly nontrivial task, involving carefully selecting appropriate training algorithms, scheduling step sizes and tuning other hyperparameters. Trying different combinations can be quite labor-intensive and time consuming. Recently, researchers have tried to use deep learning algorithms to exploit the landscape of the loss function of the training problem of interest, and learn how to optimize over it in an automatic way. In this paper, we propose a new learning-to-learn model and some useful and practical tricks. Our optimizer outperforms generic, hand-crafted optimization algorithms and state-of-the-art learning-to-learn optimizers by DeepMind in many tasks. We demonstrate the effectiveness of our algorithms on a number of tasks, including deep MLPs, CNNs, and simple LSTMs.}\n}", "pdf": "http://proceedings.mlr.press/v70/lv17a/lv17a.pdf", "supp": "", "pdf_size": 1813220, "gs_citation": 134, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9052123228970597817&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China; Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing, China", "aff_domain": "163.com;163.com;mail.tsinghua.edu.cn", "email": "163.com;163.com;mail.tsinghua.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/lv17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Tsinghua University", "aff_unique_dep": "Institute for Interdisciplinary Information Sciences", "aff_unique_url": "https://www.tsinghua.edu.cn", "aff_unique_abbr": "Tsinghua", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Beijing", "aff_country_unique_index": "0;0;0", "aff_country_unique": "China" }, { "title": "Learning Hawkes Processes from Short Doubly-Censored Event Sequences", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/543", "id": "543", "author_site": "Hongteng Xu, Dixin Luo, Hongyuan Zha", "author": "Hongteng Xu; Dixin Luo; Hongyuan Zha", "abstract": "Many real-world applications require robust algorithms to learn point process models based on a type of incomplete data \u2014 the so-called short doubly-censored (SDC) event sequences. In this paper, we study this critical problem of quantitative asynchronous event sequence analysis under the framework of Hawkes processes by leveraging the general idea of data synthesis. In particular, given SDC event sequences observed in a variety of time intervals, we propose a sampling-stitching data synthesis method \u2014 sampling predecessor and successor for each SDC event sequence from potential candidates and stitching them together to synthesize long training sequences. The rationality and the feasibility of our method are discussed in terms of arguments based on likelihood. Experiments on both synthetic and real-world data demonstrate that the proposed data synthesis method improves learning results indeed for both time-invariant and time-varying Hawkes processes.", "bibtex": "@InProceedings{pmlr-v70-xu17b,\n title = \t {Learning {H}awkes Processes from Short Doubly-Censored Event Sequences},\n author = {Hongteng Xu and Dixin Luo and Hongyuan Zha},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3831--3840},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xu17b/xu17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/xu17b.html},\n abstract = \t {Many real-world applications require robust algorithms to learn point process models based on a type of incomplete data \u2014 the so-called short doubly-censored (SDC) event sequences. In this paper, we study this critical problem of quantitative asynchronous event sequence analysis under the framework of Hawkes processes by leveraging the general idea of data synthesis. In particular, given SDC event sequences observed in a variety of time intervals, we propose a sampling-stitching data synthesis method \u2014 sampling predecessor and successor for each SDC event sequence from potential candidates and stitching them together to synthesize long training sequences. The rationality and the feasibility of our method are discussed in terms of arguments based on likelihood. Experiments on both synthetic and real-world data demonstrate that the proposed data synthesis method improves learning results indeed for both time-invariant and time-varying Hawkes processes.}\n}", "pdf": "http://proceedings.mlr.press/v70/xu17b/xu17b.pdf", "supp": "", "pdf_size": 5179044, "gs_citation": 81, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2904144413797959127&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "1Georgia Institute of Technology, Atlanta, Georgia, USA; 2University of Toronto, Toronto, Ontario, Canada; 1Georgia Institute of Technology, Atlanta, Georgia, USA", "aff_domain": "gatech.edu;utoronto.ca; ", "email": "gatech.edu;utoronto.ca; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/xu17b.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Georgia Institute of Technology;University of Toronto", "aff_unique_dep": ";", "aff_unique_url": "https://www.gatech.edu;https://www.utoronto.ca", "aff_unique_abbr": "Georgia Tech;U of T", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Atlanta;Toronto", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Canada" }, { "title": "Learning Hierarchical Features from Deep Generative Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/852", "id": "852", "author_site": "Shengjia Zhao, Jiaming Song, Stefano Ermon", "author": "Shengjia Zhao; Jiaming Song; Stefano Ermon", "abstract": "Deep neural networks have been shown to be very successful at learning feature hierarchies in supervised learning tasks. Generative models, on the other hand, have benefited less from hierarchical models with multiple layers of latent variables. In this paper, we prove that hierarchical latent variable models do not take advantage of the hierarchical structure when trained with existing variational methods, and provide some limitations on the kind of features existing models can learn. Finally we propose an alternative architecture that do not suffer from these limitations. Our model is able to learn highly interpretable and disentangled hierarchical features on several natural image datasets with no task specific regularization or prior knowledge.", "bibtex": "@InProceedings{pmlr-v70-zhao17c,\n title = \t {Learning Hierarchical Features from Deep Generative Models},\n author = {Shengjia Zhao and Jiaming Song and Stefano Ermon},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4091--4099},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhao17c/zhao17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhao17c.html},\n abstract = \t {Deep neural networks have been shown to be very successful at learning feature hierarchies in supervised learning tasks. Generative models, on the other hand, have benefited less from hierarchical models with multiple layers of latent variables. In this paper, we prove that hierarchical latent variable models do not take advantage of the hierarchical structure when trained with existing variational methods, and provide some limitations on the kind of features existing models can learn. Finally we propose an alternative architecture that do not suffer from these limitations. Our model is able to learn highly interpretable and disentangled hierarchical features on several natural image datasets with no task specific regularization or prior knowledge.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhao17c/zhao17c.pdf", "supp": "", "pdf_size": 1289921, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2986161404045113227&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu;stanford.edu; ", "email": "stanford.edu;stanford.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zhao17c.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Important Features Through Propagating Activation Differences", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/614", "id": "614", "author_site": "Avanti Shrikumar, Peyton Greenside, Anshul Kundaje", "author": "Avanti Shrikumar; Peyton Greenside; Anshul Kundaje", "abstract": "The purported \u201cblack box\u201d nature of neural networks is a barrier to adoption in applications where interpretability is essential. Here we present DeepLIFT (Deep Learning Important FeaTures), a method for decomposing the output prediction of a neural network on a specific input by backpropagating the contributions of all neurons in the network to every feature of the input. DeepLIFT compares the activation of each neuron to its `reference activation\u2019 and assigns contribution scores according to the difference. By optionally giving separate consideration to positive and negative contributions, DeepLIFT can also reveal dependencies which are missed by other approaches. Scores can be computed efficiently in a single backward pass. We apply DeepLIFT to models trained on MNIST and simulated genomic data, and show significant advantages over gradient-based methods. Video tutorial: http://goo.gl/qKb7pL code: http://goo.gl/RM8jvH", "bibtex": "@InProceedings{pmlr-v70-shrikumar17a,\n title = \t {Learning Important Features Through Propagating Activation Differences},\n author = {Avanti Shrikumar and Peyton Greenside and Anshul Kundaje},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3145--3153},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shrikumar17a/shrikumar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shrikumar17a.html},\n abstract = \t {The purported \u201cblack box\u201d nature of neural networks is a barrier to adoption in applications where interpretability is essential. Here we present DeepLIFT (Deep Learning Important FeaTures), a method for decomposing the output prediction of a neural network on a specific input by backpropagating the contributions of all neurons in the network to every feature of the input. DeepLIFT compares the activation of each neuron to its `reference activation\u2019 and assigns contribution scores according to the difference. By optionally giving separate consideration to positive and negative contributions, DeepLIFT can also reveal dependencies which are missed by other approaches. Scores can be computed efficiently in a single backward pass. We apply DeepLIFT to models trained on MNIST and simulated genomic data, and show significant advantages over gradient-based methods. Video tutorial: http://goo.gl/qKb7pL code: http://goo.gl/RM8jvH}\n}", "pdf": "http://proceedings.mlr.press/v70/shrikumar17a/shrikumar17a.pdf", "supp": "", "pdf_size": 898035, "gs_citation": 5417, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3870608604214378324&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu; ; ", "email": "stanford.edu; ; ", "github": "", "project": "http://goo.gl/RM8jvH", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/shrikumar17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Infinite Layer Networks Without the Kernel Trick", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/755", "id": "755", "author_site": "Roi Livni, Daniel Carmon, Amir Globerson", "author": "Roi Livni; Daniel Carmon; Amir Globerson", "abstract": "Infinite Layer Networks (ILN) have been proposed as an architecture that mimics neural networks while enjoying some of the advantages of kernel methods. ILN are networks that integrate over infinitely many nodes within a single hidden layer. It has been demonstrated by several authors that the problem of learning ILN can be reduced to the kernel trick, implying that whenever a certain integral can be computed analytically they are efficiently learnable. In this work we give an online algorithm for ILN, which avoids the kernel trick assumption. More generally and of independent interest, we show that kernel methods in general can be exploited even when the kernel cannot be efficiently computed but can only be estimated via sampling. We provide a regret analysis for our algorithm, showing that it matches the sample complexity of methods which have access to kernel values. Thus, our method is the first to demonstrate that the kernel trick is not necessary, as such, and random features suffice to obtain comparable performance.", "bibtex": "@InProceedings{pmlr-v70-livni17a,\n title = \t {Learning Infinite Layer Networks Without the Kernel Trick},\n author = {Roi Livni and Daniel Carmon and Amir Globerson},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2198--2207},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/livni17a/livni17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/livni17a.html},\n abstract = \t {Infinite Layer Networks (ILN) have been proposed as an architecture that mimics neural networks while enjoying some of the advantages of kernel methods. ILN are networks that integrate over infinitely many nodes within a single hidden layer. It has been demonstrated by several authors that the problem of learning ILN can be reduced to the kernel trick, implying that whenever a certain integral can be computed analytically they are efficiently learnable. In this work we give an online algorithm for ILN, which avoids the kernel trick assumption. More generally and of independent interest, we show that kernel methods in general can be exploited even when the kernel cannot be efficiently computed but can only be estimated via sampling. We provide a regret analysis for our algorithm, showing that it matches the sample complexity of methods which have access to kernel values. Thus, our method is the first to demonstrate that the kernel trick is not necessary, as such, and random features suffice to obtain comparable performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/livni17a/livni17a.pdf", "supp": "", "pdf_size": 381655, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4645275609714391065&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of Princeton, Princeton, New Jersey, USA+Tel-Aviv University, Tel-Aviv, Israel; Tel-Aviv University, Tel-Aviv, Israel; Tel-Aviv University, Tel-Aviv, Israel", "aff_domain": "cs.princeton.edu;mail.tau.ac.il;mail.tau.ac.il", "email": "cs.princeton.edu;mail.tau.ac.il;mail.tau.ac.il", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/livni17a.html", "aff_unique_index": "0+1;1;1", "aff_unique_norm": "Princeton University;Tel Aviv University", "aff_unique_dep": ";", "aff_unique_url": "https://www.princeton.edu;https://www.tau.ac.il", "aff_unique_abbr": "Princeton;TAU", "aff_campus_unique_index": "0+1;1;1", "aff_campus_unique": "Princeton;Tel-Aviv", "aff_country_unique_index": "0+1;1;1", "aff_country_unique": "United States;Israel" }, { "title": "Learning Latent Space Models with Angular Constraints", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/499", "id": "499", "author_site": "Pengtao Xie, Yuntian Deng, Yi Zhou, Abhimanu Kumar, Yaoliang Yu, James Zou, Eric Xing", "author": "Pengtao Xie; Yuntian Deng; Yi Zhou; Abhimanu Kumar; Yaoliang Yu; James Zou; Eric P. Xing", "abstract": "The large model capacity of latent space models (LSMs) enables them to achieve great performance on various applications, but meanwhile renders LSMs to be prone to overfitting. Several recent studies investigate a new type of regularization approach, which encourages components in LSMs to be diverse, for the sake of alleviating overfitting. While they have shown promising empirical effectiveness, in theory why larger \u201cdiversity\u201d results in less overfitting is still unclear. To bridge this gap, we propose a new diversity-promoting approach that is both theoretically analyzable and empirically effective. Specifically, we use near-orthogonality to characterize \u201cdiversity\u201d and impose angular constraints (ACs) on the components of LSMs to promote diversity. A generalization error analysis shows that larger diversity results in smaller estimation error and larger approximation error. An efficient ADMM algorithm is developed to solve the constrained LSM problems. Experiments demonstrate that ACs improve generalization performance of LSMs and outperform other diversity-promoting approaches.", "bibtex": "@InProceedings{pmlr-v70-xie17a,\n title = \t {Learning Latent Space Models with Angular Constraints},\n author = {Pengtao Xie and Yuntian Deng and Yi Zhou and Abhimanu Kumar and Yaoliang Yu and James Zou and Eric P. Xing},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3799--3810},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xie17a/xie17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/xie17a.html},\n abstract = \t {The large model capacity of latent space models (LSMs) enables them to achieve great performance on various applications, but meanwhile renders LSMs to be prone to overfitting. Several recent studies investigate a new type of regularization approach, which encourages components in LSMs to be diverse, for the sake of alleviating overfitting. While they have shown promising empirical effectiveness, in theory why larger \u201cdiversity\u201d results in less overfitting is still unclear. To bridge this gap, we propose a new diversity-promoting approach that is both theoretically analyzable and empirically effective. Specifically, we use near-orthogonality to characterize \u201cdiversity\u201d and impose angular constraints (ACs) on the components of LSMs to promote diversity. A generalization error analysis shows that larger diversity results in smaller estimation error and larger approximation error. An efficient ADMM algorithm is developed to solve the constrained LSM problems. Experiments demonstrate that ACs improve generalization performance of LSMs and outperform other diversity-promoting approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/xie17a/xie17a.pdf", "supp": "", "pdf_size": 326310, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11308032297521842103&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Machine Learning Department, Carnegie Mellon University + Petuum Inc.; School of Engineering and Applied Sciences, Harvard University; College of Engineering and Computer Science, Syracuse University; Groupon Inc.; School of Computer Science, University of Waterloo; Department of Biomedical Data Science, Stanford University; Machine Learning Department, Carnegie Mellon University + Petuum Inc.", "aff_domain": "cs.cmu.edu; ; ; ; ; ;petuum.com", "email": "cs.cmu.edu; ; ; ; ; ;petuum.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/xie17a.html", "aff_unique_index": "0+1;2;3;4;5;6;0+1", "aff_unique_norm": "Carnegie Mellon University;Petuum Inc.;Harvard University;Syracuse University;Groupon;University of Waterloo;Stanford University", "aff_unique_dep": "Machine Learning Department;;School of Engineering and Applied Sciences;College of Engineering and Computer Science;;School of Computer Science;Department of Biomedical Data Science", "aff_unique_url": "https://www.cmu.edu;https://www.petuum.com;https://www.harvard.edu;https://www.syracuse.edu;https://www.groupon.com;https://uwaterloo.ca;https://www.stanford.edu", "aff_unique_abbr": "CMU;;Harvard;Syracuse;Groupon;UWaterloo;Stanford", "aff_campus_unique_index": ";1;2;3;", "aff_campus_unique": ";Cambridge;Waterloo;Stanford", "aff_country_unique_index": "0+0;0;0;0;1;0;0+0", "aff_country_unique": "United States;Canada" }, { "title": "Learning Sleep Stages from Radio Signals: A Conditional Adversarial Architecture", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/889", "id": "889", "author_site": "Mingmin Zhao, Shichao Yue, Dina Katabi, Tommi Jaakkola, Matt Bianchi", "author": "Mingmin Zhao; Shichao Yue; Dina Katabi; Tommi S. Jaakkola; Matt T. Bianchi", "abstract": "We focus on predicting sleep stages from radio measurements without any attached sensors on subjects. We introduce a new predictive model that combines convolutional and recurrent neural networks to extract sleep-specific subject-invariant features from RF signals and capture the temporal progression of sleep. A key innovation underlying our approach is a modified adversarial training regime that discards extraneous information specific to individuals or measurement conditions, while retaining all information relevant to the predictive task. We analyze our game theoretic setup and empirically demonstrate that our model achieves significant improvements over state-of-the-art solutions.", "bibtex": "@InProceedings{pmlr-v70-zhao17d,\n title = \t {Learning Sleep Stages from Radio Signals: A Conditional Adversarial Architecture},\n author = {Mingmin Zhao and Shichao Yue and Dina Katabi and Tommi S. Jaakkola and Matt T. Bianchi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4100--4109},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhao17d/zhao17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhao17d.html},\n abstract = \t {We focus on predicting sleep stages from radio measurements without any attached sensors on subjects. We introduce a new predictive model that combines convolutional and recurrent neural networks to extract sleep-specific subject-invariant features from RF signals and capture the temporal progression of sleep. A key innovation underlying our approach is a modified adversarial training regime that discards extraneous information specific to individuals or measurement conditions, while retaining all information relevant to the predictive task. We analyze our game theoretic setup and empirically demonstrate that our model achieves significant improvements over state-of-the-art solutions.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhao17d/zhao17d.pdf", "supp": "", "pdf_size": 604365, "gs_citation": 325, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7029574354312284563&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "MIT CSAIL, Cambridge, MA, USA; MIT CSAIL, Cambridge, MA, USA; MIT CSAIL, Cambridge, MA, USA; MIT CSAIL, Cambridge, MA, USA; Massachusetts General Hospital, Boston, MA, USA", "aff_domain": "mit.edu; ; ; ; ", "email": "mit.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/zhao17d.html", "aff_unique_index": "0;0;0;0;1", "aff_unique_norm": "Massachusetts Institute of Technology;Massachusetts General Hospital", "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", "aff_unique_url": "https://www.csail.mit.edu;https://www.massgeneral.org", "aff_unique_abbr": "MIT CSAIL;MGH", "aff_campus_unique_index": "0;0;0;0;1", "aff_campus_unique": "Cambridge;Boston", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning Stable Stochastic Nonlinear Dynamical Systems", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/531", "id": "531", "author_site": "Jonas Umlauft, Sandra Hirche", "author": "Jonas Umlauft; Sandra Hirche", "abstract": "A data-driven identification of dynamical systems requiring only minimal prior knowledge is promising whenever no analytically derived model structure is available, e.g., from first principles in physics. However, meta-knowledge on the system\u2019s behavior is often given and should be exploited: Stability as fundamental property is essential when the model is used for controller design or movement generation. Therefore, this paper proposes a framework for learning stable stochastic systems from data. We focus on identifying a state-dependent coefficient form of the nonlinear stochastic model which is globally asymptotically stable according to probabilistic Lyapunov methods. We compare our approach to other state of the art methods on real-world datasets in terms of flexibility and stability.", "bibtex": "@InProceedings{pmlr-v70-umlauft17a,\n title = \t {Learning Stable Stochastic Nonlinear Dynamical Systems},\n author = {Jonas Umlauft and Sandra Hirche},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3502--3510},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/umlauft17a/umlauft17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/umlauft17a.html},\n abstract = \t {A data-driven identification of dynamical systems requiring only minimal prior knowledge is promising whenever no analytically derived model structure is available, e.g., from first principles in physics. However, meta-knowledge on the system\u2019s behavior is often given and should be exploited: Stability as fundamental property is essential when the model is used for controller design or movement generation. Therefore, this paper proposes a framework for learning stable stochastic systems from data. We focus on identifying a state-dependent coefficient form of the nonlinear stochastic model which is globally asymptotically stable according to probabilistic Lyapunov methods. We compare our approach to other state of the art methods on real-world datasets in terms of flexibility and stability.}\n}", "pdf": "http://proceedings.mlr.press/v70/umlauft17a/umlauft17a.pdf", "supp": "", "pdf_size": 576232, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11206490692042646671&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Chair of Information-oriented Control, Technical University of Munich, Munich, Germany; Chair of Information-oriented Control, Technical University of Munich, Munich, Germany", "aff_domain": "tum.de; ", "email": "tum.de; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/umlauft17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Technical University of Munich", "aff_unique_dep": "Chair of Information-oriented Control", "aff_unique_url": "https://www.tum.de", "aff_unique_abbr": "TUM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Munich", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "Learning Texture Manifolds with the Periodic Spatial GAN", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/664", "id": "664", "author_site": "Urs M Bergmann, Nikolay Jetchev, Roland Vollgraf", "author": "Urs Bergmann; Nikolay Jetchev; Roland Vollgraf", "abstract": "This paper introduces a novel approach to texture synthesis based on generative adversarial networks (GAN) (Goodfellow et al., 2014), and call this technique Periodic Spatial GAN (PSGAN). The PSGAN has several novel abilities which surpass the current state of the art in texture synthesis. First, we can learn multiple textures, periodic or non-periodic, from datasets of one or more complex large images. Second, we show that the image generation with PSGANs has properties of a texture manifold: we can smoothly interpolate between samples in the structured noise space and generate novel samples, which lie perceptually between the textures of the original dataset. We make multiple experiments which show that PSGANs can flexibly handle diverse texture and image data sources, and the method is highly scalable and can generate output images of arbitrary large size.", "bibtex": "@InProceedings{pmlr-v70-bergmann17a,\n title = \t {Learning Texture Manifolds with the Periodic Spatial {GAN}},\n author = {Urs Bergmann and Nikolay Jetchev and Roland Vollgraf},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {469--477},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bergmann17a/bergmann17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bergmann17a.html},\n abstract = \t {This paper introduces a novel approach to texture synthesis based on generative adversarial networks (GAN) (Goodfellow et al., 2014), and call this technique Periodic Spatial GAN (PSGAN). The PSGAN has several novel abilities which surpass the current state of the art in texture synthesis. First, we can learn multiple textures, periodic or non-periodic, from datasets of one or more complex large images. Second, we show that the image generation with PSGANs has properties of a texture manifold: we can smoothly interpolate between samples in the structured noise space and generate novel samples, which lie perceptually between the textures of the original dataset. We make multiple experiments which show that PSGANs can flexibly handle diverse texture and image data sources, and the method is highly scalable and can generate output images of arbitrary large size.}\n}", "pdf": "http://proceedings.mlr.press/v70/bergmann17a/bergmann17a.pdf", "supp": "", "pdf_size": 7301223, "gs_citation": 190, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1389278938259265903&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Zalando Research, Berlin; Zalando Research, Berlin; Zalando Research, Berlin", "aff_domain": "zalando.de;zalando.de;zalando.de", "email": "zalando.de;zalando.de;zalando.de", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/bergmann17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Zalando Research", "aff_unique_dep": "Research", "aff_unique_url": "https://research.zalando.com", "aff_unique_abbr": "Zalando Res.", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berlin", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Germany" }, { "title": "Learning from Clinical Judgments: Semi-Markov-Modulated Marked Hawkes Processes for Risk Prognosis", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/573", "id": "573", "author_site": "Ahmed M. Alaa, Scott B Hu, Mihaela van der Schaar", "author": "Ahmed M. Alaa; Scott Hu; Mihaela Schaar", "abstract": "Critically ill patients in regular wards are vulnerable to unanticipated adverse events which require prompt transfer to the intensive care unit (ICU). To allow for accurate prognosis of deteriorating patients, we develop a novel continuous-time probabilistic model for a monitored patient\u2019s temporal sequence of physiological data. Our model captures \u201cinformatively sampled\u201d patient episodes: the clinicians\u2019 decisions on when to observe a hospitalized patient\u2019s vital signs and lab tests over time are represented by a marked Hawkes process, with intensity parameters that are modulated by the patient\u2019s latent clinical states, and with observable physiological data (mark process) modeled as a switching multi-task Gaussian process. In addition, our model captures \u201cinformatively censored\u201d patient episodes by representing the patient\u2019s latent clinical states as an absorbing semi-Markov jump process. The model parameters are learned from offline patient episodes in the electronic health records via an EM-based algorithm. Experiments conducted on a cohort of patients admitted to a major medical center over a 3-year period show that risk prognosis based on our model significantly outperforms the currently deployed medical risk scores and other baseline machine learning algorithms.", "bibtex": "@InProceedings{pmlr-v70-alaa17a,\n title = \t {Learning from Clinical Judgments: Semi-{M}arkov-Modulated Marked {H}awkes Processes for Risk Prognosis},\n author = {Ahmed M. Alaa and Scott Hu and Mihaela van der Schaar},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {60--69},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/alaa17a/alaa17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/alaa17a.html},\n abstract = \t {Critically ill patients in regular wards are vulnerable to unanticipated adverse events which require prompt transfer to the intensive care unit (ICU). To allow for accurate prognosis of deteriorating patients, we develop a novel continuous-time probabilistic model for a monitored patient\u2019s temporal sequence of physiological data. Our model captures \u201cinformatively sampled\u201d patient episodes: the clinicians\u2019 decisions on when to observe a hospitalized patient\u2019s vital signs and lab tests over time are represented by a marked Hawkes process, with intensity parameters that are modulated by the patient\u2019s latent clinical states, and with observable physiological data (mark process) modeled as a switching multi-task Gaussian process. In addition, our model captures \u201cinformatively censored\u201d patient episodes by representing the patient\u2019s latent clinical states as an absorbing semi-Markov jump process. The model parameters are learned from offline patient episodes in the electronic health records via an EM-based algorithm. Experiments conducted on a cohort of patients admitted to a major medical center over a 3-year period show that risk prognosis based on our model significantly outperforms the currently deployed medical risk scores and other baseline machine learning algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/alaa17a/alaa17a.pdf", "supp": "", "pdf_size": 182800, "gs_citation": 72, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14684700428664876985&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of California, Los Angeles, US+University of Oxford, UK+Alan Turing Institute, UK; University of California, Los Angeles, US; University of California, Los Angeles, US+University of Oxford, UK+Alan Turing Institute, UK", "aff_domain": "ucla.edu; ; ", "email": "ucla.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/alaa17a.html", "aff_unique_index": "0+1+2;0;0+1+2", "aff_unique_norm": "University of California, Los Angeles;University of Oxford;Alan Turing Institute", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ucla.edu;https://www.ox.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "UCLA;Oxford;ATI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Los Angeles;", "aff_country_unique_index": "0+1+1;0;0+1+1", "aff_country_unique": "United States;United Kingdom" }, { "title": "Learning in POMDPs with Monte Carlo Tree Search", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/750", "id": "750", "author_site": "Sammie Katt, Frans A Oliehoek, Chris Amato", "author": "Sammie Katt; Frans A. Oliehoek; Christopher Amato", "abstract": "The POMDP is a powerful framework for reasoning under outcome and information uncertainty, but constructing an accurate POMDP model is difficult. Bayes-Adaptive Partially Observable Markov Decision Processes (BA-POMDPs) extend POMDPs to allow the model to be learned during execution. BA-POMDPs are a Bayesian RL approach that, in principle, allows for an optimal trade-off between exploitation and exploration. Unfortunately, BA-POMDPs are currently impractical to solve for any non-trivial domain. In this paper, we extend the Monte-Carlo Tree Search method POMCP to BA-POMDPs and show that the resulting method, which we call BA-POMCP, is able to tackle problems that previous solution methods have been unable to solve. Additionally, we introduce several techniques that exploit the BA-POMDP structure to improve the efficiency of BA-POMCP along with proof of their convergence.", "bibtex": "@InProceedings{pmlr-v70-katt17a,\n title = \t {Learning in {POMDP}s with {M}onte {C}arlo Tree Search},\n author = {Sammie Katt and Frans A. Oliehoek and Christopher Amato},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1819--1827},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/katt17a/katt17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/katt17a.html},\n abstract = \t {The POMDP is a powerful framework for reasoning under outcome and information uncertainty, but constructing an accurate POMDP model is difficult. Bayes-Adaptive Partially Observable Markov Decision Processes (BA-POMDPs) extend POMDPs to allow the model to be learned during execution. BA-POMDPs are a Bayesian RL approach that, in principle, allows for an optimal trade-off between exploitation and exploration. Unfortunately, BA-POMDPs are currently impractical to solve for any non-trivial domain. In this paper, we extend the Monte-Carlo Tree Search method POMCP to BA-POMDPs and show that the resulting method, which we call BA-POMCP, is able to tackle problems that previous solution methods have been unable to solve. Additionally, we introduce several techniques that exploit the BA-POMDP structure to improve the efficiency of BA-POMCP along with proof of their convergence.}\n}", "pdf": "http://proceedings.mlr.press/v70/katt17a/katt17a.pdf", "supp": "", "pdf_size": 489849, "gs_citation": 96, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3932396554534472784&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "Northeastern University, Boston, Massachusetts,USA; University of Liverpool, UK; Northeastern University, Boston, Massachusetts,USA", "aff_domain": "husky.neu.edu; ; ", "email": "husky.neu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/katt17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Northeastern University;University of Liverpool", "aff_unique_dep": ";", "aff_unique_url": "https://www.northeastern.edu;https://www.liverpool.ac.uk", "aff_unique_abbr": "NEU;Liv Uni", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boston;", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Learning the Structure of Generative Models without Labeled Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/464", "id": "464", "author_site": "Stephen Bach, Bryan He, Alexander J Ratner, Christopher Re", "author": "Stephen H. Bach; Bryan He; Alexander Ratner; Christopher R\u00e9", "abstract": "Curating labeled training data has become the primary bottleneck in machine learning. Recent frameworks address this bottleneck with generative models to synthesize labels at scale from weak supervision sources. The generative model\u2019s dependency structure directly affects the quality of the estimated labels, but selecting a structure automatically without any labeled data is a distinct challenge. We propose a structure estimation method that maximizes the l1-regularized marginal pseudolikelihood of the observed data. Our analysis shows that the amount of unlabeled data required to identify the true structure scales sublinearly in the number of possible dependencies for a broad class of models. Simulations show that our method is 100x faster than a maximum likelihood approach and selects 1/4 as many extraneous dependencies. We also show that our method provides an average of 1.5 F1 points of improvement over existing, user-developed information extraction applications on real-world data such as PubMed journal abstracts.", "bibtex": "@InProceedings{pmlr-v70-bach17a,\n title = \t {Learning the Structure of Generative Models without Labeled Data},\n author = {Stephen H. Bach and Bryan He and Alexander Ratner and Christopher R{\\'e}},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {273--282},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bach17a/bach17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bach17a.html},\n abstract = \t {Curating labeled training data has become the primary bottleneck in machine learning. Recent frameworks address this bottleneck with generative models to synthesize labels at scale from weak supervision sources. The generative model\u2019s dependency structure directly affects the quality of the estimated labels, but selecting a structure automatically without any labeled data is a distinct challenge. We propose a structure estimation method that maximizes the l1-regularized marginal pseudolikelihood of the observed data. Our analysis shows that the amount of unlabeled data required to identify the true structure scales sublinearly in the number of possible dependencies for a broad class of models. Simulations show that our method is 100x faster than a maximum likelihood approach and selects 1/4 as many extraneous dependencies. We also show that our method provides an average of 1.5 F1 points of improvement over existing, user-developed information extraction applications on real-world data such as PubMed journal abstracts.}\n}", "pdf": "http://proceedings.mlr.press/v70/bach17a/bach17a.pdf", "supp": "", "pdf_size": 687718, "gs_citation": 202, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10373132577784369690&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 14, "aff": "Stanford University; Stanford University; Stanford University; Stanford University", "aff_domain": "cs.stanford.edu; ; ; ", "email": "cs.stanford.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bach17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Learning to Aggregate Ordinal Labels by Maximizing Separating Width", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/503", "id": "503", "author_site": "Guangyong Chen, Shengyu Zhang, Di Lin, Hui Huang, Pheng Ann Heng", "author": "Guangyong Chen; Shengyu Zhang; Di Lin; Hui Huang; Pheng Ann Heng", "abstract": "While crowdsourcing has been a cost and time efficient method to label massive samples, one critical issue is quality control, for which the key challenge is to infer the ground truth from noisy or even adversarial data by various users. A large class of crowdsourcing problems, such as those involving age, grade, level, or stage, have an ordinal structure in their labels. Based on a technique of sampling estimated label from the posterior distribution, we define a novel separating width among the labeled observations to characterize the quality of sampled labels, and develop an efficient algorithm to optimize it through solving multiple linear decision boundaries and adjusting prior distributions. Our algorithm is empirically evaluated on several real world datasets, and demonstrates its supremacy over state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v70-chen17i,\n title = \t {Learning to Aggregate Ordinal Labels by Maximizing Separating Width},\n author = {Guangyong Chen and Shengyu Zhang and Di Lin and Hui Huang and Pheng Ann Heng},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {787--796},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17i/chen17i.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17i.html},\n abstract = \t {While crowdsourcing has been a cost and time efficient method to label massive samples, one critical issue is quality control, for which the key challenge is to infer the ground truth from noisy or even adversarial data by various users. A large class of crowdsourcing problems, such as those involving age, grade, level, or stage, have an ordinal structure in their labels. Based on a technique of sampling estimated label from the posterior distribution, we define a novel separating width among the labeled observations to characterize the quality of sampled labels, and develop an efficient algorithm to optimize it through solving multiple linear decision boundaries and adjusting prior distributions. Our algorithm is empirically evaluated on several real world datasets, and demonstrates its supremacy over state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17i/chen17i.pdf", "supp": "", "pdf_size": 659751, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12970346933680175189&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "The Chinese University of Hong Kong, Hong Kong, China; The Chinese University of Hong Kong, Hong Kong, China; Shenzhen University, China; Shenzhen University, China; The Chinese University of Hong Kong, Hong Kong, China + Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China", "aff_domain": "cse.cuhk.edu.hk; ; ; ; ", "email": "cse.cuhk.edu.hk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/chen17i.html", "aff_unique_index": "0;0;1;1;0+2", "aff_unique_norm": "Chinese University of Hong Kong;Shenzhen University;Chinese Academy of Sciences", "aff_unique_dep": ";;Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology", "aff_unique_url": "https://www.cuhk.edu.hk;https://www.szu.edu.cn;http://www.cas.cn", "aff_unique_abbr": "CUHK;SZU;CAS", "aff_campus_unique_index": "0;0;0+2", "aff_campus_unique": "Hong Kong;;Shenzhen", "aff_country_unique_index": "0;0;0;0;0+0", "aff_country_unique": "China" }, { "title": "Learning to Align the Source Code to the Compiled Object Code", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/821", "id": "821", "author_site": "Dor Levy, Lior Wolf", "author": "Dor Levy; Lior Wolf", "abstract": "We propose a new neural network architecture and use it for the task of statement-by-statement alignment of source code and its compiled object code. Our architecture learns the alignment between the two sequences \u2013 one being the translation of the other \u2013 by mapping each statement to a context-dependent representation vector and aligning such vectors using a grid of the two sequence domains. Our experiments include short C functions, both artificial and human-written, and show that our neural network architecture is able to predict the alignment with high accuracy, outperforming known baselines. We also demonstrate that our model is general and can learn to solve graph problems such as the Traveling Salesman Problem.", "bibtex": "@InProceedings{pmlr-v70-levy17a,\n title = \t {Learning to Align the Source Code to the Compiled Object Code},\n author = {Dor Levy and Lior Wolf},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2043--2051},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/levy17a/levy17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/levy17a.html},\n abstract = \t {We propose a new neural network architecture and use it for the task of statement-by-statement alignment of source code and its compiled object code. Our architecture learns the alignment between the two sequences \u2013 one being the translation of the other \u2013 by mapping each statement to a context-dependent representation vector and aligning such vectors using a grid of the two sequence domains. Our experiments include short C functions, both artificial and human-written, and show that our neural network architecture is able to predict the alignment with high accuracy, outperforming known baselines. We also demonstrate that our model is general and can learn to solve graph problems such as the Traveling Salesman Problem.}\n}", "pdf": "http://proceedings.mlr.press/v70/levy17a/levy17a.pdf", "supp": "", "pdf_size": 501541, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17370129900594621451&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "The School of Computer Science, Tel Aviv University; The School of Computer Science, Tel Aviv University + Facebook AI Research", "aff_domain": "cs.tau.ac.il;cs.tau.ac.il", "email": "cs.tau.ac.il;cs.tau.ac.il", "github": "https://github.com/DorLevyML/learn-align", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/levy17a.html", "aff_unique_index": "0;0+1", "aff_unique_norm": "Tel Aviv University;Meta", "aff_unique_dep": "School of Computer Science;Facebook AI Research", "aff_unique_url": "https://www.tau.ac.il;https://research.facebook.com", "aff_unique_abbr": "TAU;FAIR", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Tel Aviv;", "aff_country_unique_index": "0;0+1", "aff_country_unique": "Israel;United States" }, { "title": "Learning to Detect Sepsis with a Multitask Gaussian Process RNN Classifier", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/757", "id": "757", "author_site": "Joseph Futoma, Sanjay Hariharan, Katherine Heller", "author": "Joseph Futoma; Sanjay Hariharan; Katherine Heller", "abstract": "We present a scalable end-to-end classifier that uses streaming physiological and medication data to accurately predict the onset of sepsis, a life-threatening complication from infections that has high mortality and morbidity. Our proposed framework models the multivariate trajectories of continuous-valued physiological time series using multitask Gaussian processes, seamlessly accounting for the high uncertainty, frequent missingness, and irregular sampling rates typically associated with real clinical data. The Gaussian process is directly connected to a black-box classifier that predicts whether a patient will become septic, chosen in our case to be a recurrent neural network to account for the extreme variability in the length of patient encounters. We show how to scale the computations associated with the Gaussian process in a manner so that the entire system can be discriminatively trained end-to-end using backpropagation. In a large cohort of heterogeneous inpatient encounters at our university health system we find that it outperforms several baselines at predicting sepsis, and yields 19.4\\% and 55.5\\% improved areas under the Receiver Operating Characteristic and Precision Recall curves as compared to the NEWS score currently used by our hospital.", "bibtex": "@InProceedings{pmlr-v70-futoma17a,\n title = \t {Learning to Detect Sepsis with a Multitask {G}aussian Process {RNN} Classifier},\n author = {Joseph Futoma and Sanjay Hariharan and Katherine Heller},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1174--1182},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/futoma17a/futoma17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/futoma17a.html},\n abstract = \t {We present a scalable end-to-end classifier that uses streaming physiological and medication data to accurately predict the onset of sepsis, a life-threatening complication from infections that has high mortality and morbidity. Our proposed framework models the multivariate trajectories of continuous-valued physiological time series using multitask Gaussian processes, seamlessly accounting for the high uncertainty, frequent missingness, and irregular sampling rates typically associated with real clinical data. The Gaussian process is directly connected to a black-box classifier that predicts whether a patient will become septic, chosen in our case to be a recurrent neural network to account for the extreme variability in the length of patient encounters. We show how to scale the computations associated with the Gaussian process in a manner so that the entire system can be discriminatively trained end-to-end using backpropagation. In a large cohort of heterogeneous inpatient encounters at our university health system we find that it outperforms several baselines at predicting sepsis, and yields 19.4\\% and 55.5\\% improved areas under the Receiver Operating Characteristic and Precision Recall curves as compared to the NEWS score currently used by our hospital.}\n}", "pdf": "http://proceedings.mlr.press/v70/futoma17a/futoma17a.pdf", "supp": "", "pdf_size": 1269063, "gs_citation": 228, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7512878614206965379&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Dept. of Statistical Science, Duke University; Dept. of Statistical Science, Duke University; Dept. of Statistical Science, Duke University", "aff_domain": "duke.edu; ; ", "email": "duke.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/futoma17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "Dept. of Statistical Science", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Learning to Discover Cross-Domain Relations with Generative Adversarial Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/496", "id": "496", "author_site": "Taeksoo Kim, Moonsu Cha, Hyunsoo Kim, Jungkwon Lee, Jiwon Kim", "author": "Taeksoo Kim; Moonsu Cha; Hyunsoo Kim; Jung Kwon Lee; Jiwon Kim", "abstract": "While humans easily recognize relations between data from different domains without any supervision, learning to automatically discover them is in general very challenging and needs many ground-truth pairs that illustrate the relations. To avoid costly pairing, we address the task of discovering cross-domain relations given unpaired data. We propose a method based on a generative adversarial network that learns to discover relations between different domains (DiscoGAN). Using the discovered relations, our proposed network successfully transfers style from one domain to another while preserving key attributes such as orientation and face identity.", "bibtex": "@InProceedings{pmlr-v70-kim17a,\n title = \t {Learning to Discover Cross-Domain Relations with Generative Adversarial Networks},\n author = {Taeksoo Kim and Moonsu Cha and Hyunsoo Kim and Jung Kwon Lee and Jiwon Kim},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1857--1865},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kim17a/kim17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kim17a.html},\n abstract = \t {While humans easily recognize relations between data from different domains without any supervision, learning to automatically discover them is in general very challenging and needs many ground-truth pairs that illustrate the relations. To avoid costly pairing, we address the task of discovering cross-domain relations given unpaired data. We propose a method based on a generative adversarial network that learns to discover relations between different domains (DiscoGAN). Using the discovered relations, our proposed network successfully transfers style from one domain to another while preserving key attributes such as orientation and face identity.}\n}", "pdf": "http://proceedings.mlr.press/v70/kim17a/kim17a.pdf", "supp": "", "pdf_size": 2939937, "gs_citation": 2810, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=463778412690777341&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "SK T-Brain, Seoul, South Korea; SK T-Brain, Seoul, South Korea; SK T-Brain, Seoul, South Korea; SK T-Brain, Seoul, South Korea; SK T-Brain, Seoul, South Korea", "aff_domain": "sktbrain.com; ; ; ; ", "email": "sktbrain.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/kim17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "SK T-Brain", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Seoul", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "South Korea" }, { "title": "Learning to Discover Sparse Graphical Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/706", "id": "706", "author_site": "Eugene Belilovsky, Kyle Kastner, Gael Varoquaux, Matthew B Blaschko", "author": "Eugene Belilovsky; Kyle Kastner; Gael Varoquaux; Matthew B. Blaschko", "abstract": "We consider structure discovery of undirected graphical models from observational data. Inferring likely structures from few examples is a complex task often requiring the formulation of priors and sophisticated inference procedures. Popular methods rely on estimating a penalized maximum likelihood of the precision matrix. However, in these approaches structure recovery is an indirect consequence of the data-fit term, the penalty can be difficult to adapt for domain-specific knowledge, and the inference is computationally demanding. By contrast, it may be easier to generate training samples of data that arise from graphs with the desired structure properties. We propose here to leverage this latter source of information as training data to learn a function, parametrized by a neural network, that maps empirical covariance matrices to estimated graph structures. Learning this function brings two benefits: it implicitly models the desired structure or sparsity properties to form suitable priors, and it can be tailored to the specific problem of edge structure discovery, rather than maximizing data likelihood. Applying this framework, we find our learnable graph-discovery method trained on synthetic data generalizes well: identifying relevant edges in both synthetic and real data, completely unknown at training time. We find that on genetics, brain imaging, and simulation data we obtain performance generally superior to analytical methods.", "bibtex": "@InProceedings{pmlr-v70-belilovsky17a,\n title = \t {Learning to Discover Sparse Graphical Models},\n author = {Eugene Belilovsky and Kyle Kastner and Gael Varoquaux and Matthew B. Blaschko},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {440--448},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/belilovsky17a/belilovsky17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/belilovsky17a.html},\n abstract = \t {We consider structure discovery of undirected graphical models from observational data. Inferring likely structures from few examples is a complex task often requiring the formulation of priors and sophisticated inference procedures. Popular methods rely on estimating a penalized maximum likelihood of the precision matrix. However, in these approaches structure recovery is an indirect consequence of the data-fit term, the penalty can be difficult to adapt for domain-specific knowledge, and the inference is computationally demanding. By contrast, it may be easier to generate training samples of data that arise from graphs with the desired structure properties. We propose here to leverage this latter source of information as training data to learn a function, parametrized by a neural network, that maps empirical covariance matrices to estimated graph structures. Learning this function brings two benefits: it implicitly models the desired structure or sparsity properties to form suitable priors, and it can be tailored to the specific problem of edge structure discovery, rather than maximizing data likelihood. Applying this framework, we find our learnable graph-discovery method trained on synthetic data generalizes well: identifying relevant edges in both synthetic and real data, completely unknown at training time. We find that on genetics, brain imaging, and simulation data we obtain performance generally superior to analytical methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/belilovsky17a/belilovsky17a.pdf", "supp": "", "pdf_size": 1545389, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8693460984110187084&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "KU Leuven+University of Paris-Saclay+University of Montreal; University of Montreal; INRIA; KU Leuven", "aff_domain": "inria.fr; ; ; ", "email": "inria.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/belilovsky17a.html", "aff_unique_index": "0+1+2;2;3;0", "aff_unique_norm": "Katholieke Universiteit Leuven;University of Paris-Saclay;University of Montreal;INRIA", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.kuleuven.be;https://www.universite-paris-saclay.fr;https://wwwumontreal.ca;https://www.inria.fr", "aff_unique_abbr": "KU Leuven;Paris-Saclay;UM;INRIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1+2;2;1;0", "aff_country_unique": "Belgium;France;Canada" }, { "title": "Learning to Generate Long-term Future via Hierarchical Prediction", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/641", "id": "641", "author_site": "Ruben Villegas, Jimei Yang, Yuliang Zou, Sungryull Sohn, Xunyu Lin, Honglak Lee", "author": "Ruben Villegas; Jimei Yang; Yuliang Zou; Sungryull Sohn; Xunyu Lin; Honglak Lee", "abstract": "We propose a hierarchical approach for making long-term predictions of future frames. To avoid inherent compounding errors in recursive pixel-level prediction, we propose to first estimate high-level structure in the input frames, then predict how that structure evolves in the future, and finally by observing a single frame from the past and the predicted high-level structure, we construct the future frames without having to observe any of the pixel-level predictions. Long-term video prediction is difficult to perform by recurrently observing the predicted frames because the small errors in pixel space exponentially amplify as predictions are made deeper into the future. Our approach prevents pixel-level error propagation from happening by removing the need to observe the predicted frames. Our model is built with a combination of LSTM and analogy based encoder-decoder convolutional neural networks, which independently predict the video structure and generate the future frames, respectively. In experiments, our model is evaluated on the Human3.6M and Penn Action datasets on the task of long-term pixel-level video prediction of humans performing actions and demonstrate significantly better results than the state-of-the-art.", "bibtex": "@InProceedings{pmlr-v70-villegas17a,\n title = \t {Learning to Generate Long-term Future via Hierarchical Prediction},\n author = {Ruben Villegas and Jimei Yang and Yuliang Zou and Sungryull Sohn and Xunyu Lin and Honglak Lee},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3560--3569},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/villegas17a.html},\n abstract = \t {We propose a hierarchical approach for making long-term predictions of future frames. To avoid inherent compounding errors in recursive pixel-level prediction, we propose to first estimate high-level structure in the input frames, then predict how that structure evolves in the future, and finally by observing a single frame from the past and the predicted high-level structure, we construct the future frames without having to observe any of the pixel-level predictions. Long-term video prediction is difficult to perform by recurrently observing the predicted frames because the small errors in pixel space exponentially amplify as predictions are made deeper into the future. Our approach prevents pixel-level error propagation from happening by removing the need to observe the predicted frames. Our model is built with a combination of LSTM and analogy based encoder-decoder convolutional neural networks, which independently predict the video structure and generate the future frames, respectively. In experiments, our model is evaluated on the Human3.6M and Penn Action datasets on the task of long-term pixel-level video prediction of humans performing actions and demonstrate significantly better results than the state-of-the-art.}\n}", "pdf": "http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf", "supp": "", "pdf_size": 794761, "gs_citation": 459, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=307545796405932708&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Electrical Engineering and Computer Science, University of Michigan, Ann Arbor, MI, USA; Adobe Research, San Jose, CA; Beihang University, Beijing, China; Department of Electrical Engineering and Computer Science, University of Michigan, Ann Arbor, MI, USA; Beihang University, Beijing, China; Department of Electrical Engineering and Computer Science, University of Michigan, Ann Arbor, MI, USA + Google Brain, Mountain View, CA", "aff_domain": "umich.edu; ; ; ; ; ", "email": "umich.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/villegas17a.html", "aff_unique_index": "0;1;2;0;2;0+3", "aff_unique_norm": "University of Michigan;Adobe;Beihang University;Google", "aff_unique_dep": "Department of Electrical Engineering and Computer Science;Adobe Research;;Google Brain", "aff_unique_url": "https://www.umich.edu;https://research.adobe.com;http://www.buaa.edu.cn/;https://brain.google.com", "aff_unique_abbr": "UM;Adobe;BUAA;Google Brain", "aff_campus_unique_index": "0;1;2;0;2;0+3", "aff_campus_unique": "Ann Arbor;San Jose;Beijing;Mountain View", "aff_country_unique_index": "0;0;1;0;1;0+0", "aff_country_unique": "United States;China" }, { "title": "Learning to Learn without Gradient Descent by Gradient Descent", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/687", "id": "687", "author_site": "Yutian Chen, Matthew Hoffman, Sergio G\u00f3mez Colmenarejo, Misha Denil, Timothy Lillicrap, Matthew Botvinick, Nando de Freitas", "author": "Yutian Chen; Matthew W. Hoffman; Sergio G\u00f3mez Colmenarejo; Misha Denil; Timothy P. Lillicrap; Matt Botvinick; Nando Freitas", "abstract": "We learn recurrent neural network optimizers trained on simple synthetic functions by gradient descent. We show that these learned optimizers exhibit a remarkable degree of transfer in that they can be used to efficiently optimize a broad range of derivative-free black-box functions, including Gaussian process bandits, simple control objectives, global optimization benchmarks and hyper-parameter tuning tasks. Up to the training horizon, the learned optimizers learn to trade-off exploration and exploitation, and compare favourably with heavily engineered Bayesian optimization packages for hyper-parameter tuning.", "bibtex": "@InProceedings{pmlr-v70-chen17e,\n title = \t {Learning to Learn without Gradient Descent by Gradient Descent},\n author = {Yutian Chen and Matthew W. Hoffman and Sergio G{\\'o}mez Colmenarejo and Misha Denil and Timothy P. Lillicrap and Matt Botvinick and Nando de Freitas},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {748--756},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17e/chen17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17e.html},\n abstract = \t {We learn recurrent neural network optimizers trained on simple synthetic functions by gradient descent. We show that these learned optimizers exhibit a remarkable degree of transfer in that they can be used to efficiently optimize a broad range of derivative-free black-box functions, including Gaussian process bandits, simple control objectives, global optimization benchmarks and hyper-parameter tuning tasks. Up to the training horizon, the learned optimizers learn to trade-off exploration and exploitation, and compare favourably with heavily engineered Bayesian optimization packages for hyper-parameter tuning.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17e/chen17e.pdf", "supp": "", "pdf_size": 768494, "gs_citation": 345, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6287137833382757427&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/chen17e.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Leveraging Node Attributes for Incomplete Relational Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/555", "id": "555", "author_site": "He Zhao, Lan Du, Wray Buntine", "author": "He Zhao; Lan Du; Wray Buntine", "abstract": "Relational data are usually highly incomplete in practice, which inspires us to leverage side information to improve the performance of community detection and link prediction. This paper presents a Bayesian probabilistic approach that incorporates various kinds of node attributes encoded in binary form in relational models with Poisson likelihood. Our method works flexibly with both directed and undirected relational networks. The inference can be done by efficient Gibbs sampling which leverages sparsity of both networks and node attributes. Extensive experiments show that our models achieve the state-of-the-art link prediction results, especially with highly incomplete relational data.", "bibtex": "@InProceedings{pmlr-v70-zhao17a,\n title = \t {Leveraging Node Attributes for Incomplete Relational Data},\n author = {He Zhao and Lan Du and Wray Buntine},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4072--4081},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhao17a/zhao17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhao17a.html},\n abstract = \t {Relational data are usually highly incomplete in practice, which inspires us to leverage side information to improve the performance of community detection and link prediction. This paper presents a Bayesian probabilistic approach that incorporates various kinds of node attributes encoded in binary form in relational models with Poisson likelihood. Our method works flexibly with both directed and undirected relational networks. The inference can be done by efficient Gibbs sampling which leverages sparsity of both networks and node attributes. Extensive experiments show that our models achieve the state-of-the-art link prediction results, especially with highly incomplete relational data.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhao17a/zhao17a.pdf", "supp": "", "pdf_size": 3678737, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4757909045546681802&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Faculty of Information Technology, Monash University, Australia; Faculty of Information Technology, Monash University, Australia; Faculty of Information Technology, Monash University, Australia", "aff_domain": "monash.edu; ; ", "email": "monash.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zhao17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Monash University", "aff_unique_dep": "Faculty of Information Technology", "aff_unique_url": "https://www.monash.edu", "aff_unique_abbr": "Monash", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "Australia" }, { "title": "Leveraging Union of Subspace Structure to Improve Constrained Clustering", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/613", "id": "613", "author_site": "John Lipor, Laura Balzano", "author": "John Lipor; Laura Balzano", "abstract": "Many clustering problems in computer vision and other contexts are also classification problems, where each cluster shares a meaningful label. Subspace clustering algorithms in particular are often applied to problems that fit this description, for example with face images or handwritten digits. While it is straightforward to request human input on these datasets, our goal is to reduce this input as much as possible. We present a pairwise-constrained clustering algorithm that actively selects queries based on the union-of-subspaces model. The central step of the algorithm is in querying points of minimum margin between estimated subspaces; analogous to classifier margin, these lie near the decision boundary. We prove that points lying near the intersection of subspaces are points with low margin. Our procedure can be used after any subspace clustering algorithm that outputs an affinity matrix. We demonstrate on several datasets that our algorithm drives the clustering error down considerably faster than the state-of-the-art active query algorithms on datasets with subspace structure and is competitive on other datasets.", "bibtex": "@InProceedings{pmlr-v70-lipor17a,\n title = \t {Leveraging Union of Subspace Structure to Improve Constrained Clustering},\n author = {John Lipor and Laura Balzano},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2130--2139},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lipor17a/lipor17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lipor17a.html},\n abstract = \t {Many clustering problems in computer vision and other contexts are also classification problems, where each cluster shares a meaningful label. Subspace clustering algorithms in particular are often applied to problems that fit this description, for example with face images or handwritten digits. While it is straightforward to request human input on these datasets, our goal is to reduce this input as much as possible. We present a pairwise-constrained clustering algorithm that actively selects queries based on the union-of-subspaces model. The central step of the algorithm is in querying points of minimum margin between estimated subspaces; analogous to classifier margin, these lie near the decision boundary. We prove that points lying near the intersection of subspaces are points with low margin. Our procedure can be used after any subspace clustering algorithm that outputs an affinity matrix. We demonstrate on several datasets that our algorithm drives the clustering error down considerably faster than the state-of-the-art active query algorithms on datasets with subspace structure and is competitive on other datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/lipor17a/lipor17a.pdf", "supp": "", "pdf_size": 698105, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18263303392250204785&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Electrical and Computer Engineering, University Michigan, Ann Arbor, MI, USA; Department of Electrical and Computer Engineering, University Michigan, Ann Arbor, MI, USA", "aff_domain": "umich.edu; ", "email": "umich.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/lipor17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Michigan", "aff_unique_dep": "Department of Electrical and Computer Engineering", "aff_unique_url": "https://www.umich.edu", "aff_unique_abbr": "UM", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ann Arbor", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Local Bayesian Optimization of Motor Skills", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/748", "id": "748", "author_site": "Riadh Akrour, Dmitry Sorokin, Jan Peters, Gerhard Neumann", "author": "Riad Akrour; Dmitry Sorokin; Jan Peters; Gerhard Neumann", "abstract": "Bayesian optimization is renowned for its sample efficiency but its application to higher dimensional tasks is impeded by its focus on global optimization. To scale to higher dimensional problems, we leverage the sample efficiency of Bayesian optimization in a local context. The optimization of the acquisition function is restricted to the vicinity of a Gaussian search distribution which is moved towards high value areas of the objective. The proposed information-theoretic update of the search distribution results in a Bayesian interpretation of local stochastic search: the search distribution encodes prior knowledge on the optimum\u2019s location and is weighted at each iteration by the likelihood of this location\u2019s optimality. We demonstrate the effectiveness of our algorithm on several benchmark objective functions as well as a continuous robotic task in which an informative prior is obtained by imitation learning.", "bibtex": "@InProceedings{pmlr-v70-akrour17a,\n title = \t {Local {B}ayesian Optimization of Motor Skills},\n author = {Riad Akrour and Dmitry Sorokin and Jan Peters and Gerhard Neumann},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {41--50},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/akrour17a/akrour17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/akrour17a.html},\n abstract = \t {Bayesian optimization is renowned for its sample efficiency but its application to higher dimensional tasks is impeded by its focus on global optimization. To scale to higher dimensional problems, we leverage the sample efficiency of Bayesian optimization in a local context. The optimization of the acquisition function is restricted to the vicinity of a Gaussian search distribution which is moved towards high value areas of the objective. The proposed information-theoretic update of the search distribution results in a Bayesian interpretation of local stochastic search: the search distribution encodes prior knowledge on the optimum\u2019s location and is weighted at each iteration by the likelihood of this location\u2019s optimality. We demonstrate the effectiveness of our algorithm on several benchmark objective functions as well as a continuous robotic task in which an informative prior is obtained by imitation learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/akrour17a/akrour17a.pdf", "supp": "", "pdf_size": 1238643, "gs_citation": 26, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17345936751573839904&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "CLAS/IAS, TU Darmstadt, Darmstadt, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany+LCAS, University of Lincoln, Lincoln, United Kingdom; CLAS/IAS, TU Darmstadt, Darmstadt, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany+LCAS, University of Lincoln, Lincoln, United Kingdom; CLAS/IAS, TU Darmstadt, Darmstadt, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany+LCAS, University of Lincoln, Lincoln, United Kingdom; CLAS/IAS, TU Darmstadt, Darmstadt, Germany+Max Planck Institute for Intelligent Systems, T\u00fcbingen, Germany+LCAS, University of Lincoln, Lincoln, United Kingdom", "aff_domain": "robot-learning.de; ; ; ", "email": "robot-learning.de; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/akrour17a.html", "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2", "aff_unique_norm": "Technische Universit\u00e4t Darmstadt;Max Planck Institute for Intelligent Systems;University of Lincoln", "aff_unique_dep": "CLAS/IAS;;LCAS", "aff_unique_url": "https://www.tu-darmstadt.de;https://www.mpi-is.mpg.de;https://www.lincoln.ac.uk", "aff_unique_abbr": "TU Darmstadt;MPI-IS;", "aff_campus_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2", "aff_campus_unique": "Darmstadt;T\u00fcbingen;Lincoln", "aff_country_unique_index": "0+0+1;0+0+1;0+0+1;0+0+1", "aff_country_unique": "Germany;United Kingdom" }, { "title": "Local-to-Global Bayesian Network Structure Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/688", "id": "688", "author_site": "Tian Gao, Kshitij Fadnis, Murray Campbell", "author": "Tian Gao; Kshitij Fadnis; Murray Campbell", "abstract": "We introduce a new local-to-global structure learning algorithm, called graph growing structure learning (GGSL), to learn Bayesian network (BN) structures. GGSL starts at a (random) node and then gradually expands the learned structure through a series of local learning steps. At each local learning step, the proposed algorithm only needs to revisit a subset of the learned nodes, consisting of the local neighborhood of a target, and therefore improves on both memory and time efficiency compared to traditional global structure learning approaches. GGSL also improves on the existing local-to-global learning approaches by removing the need for conflict-resolving AND-rules, and achieves better learning accuracy. We provide theoretical analysis for the local learning step, and show that GGSL outperforms existing algorithms on benchmark datasets. Overall, GGSL demonstrates a novel direction to scale up BN structure learning while limiting accuracy loss.", "bibtex": "@InProceedings{pmlr-v70-gao17a,\n title = \t {Local-to-Global {B}ayesian Network Structure Learning},\n author = {Tian Gao and Kshitij Fadnis and Murray Campbell},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1193--1202},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gao17a/gao17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gao17a.html},\n abstract = \t {We introduce a new local-to-global structure learning algorithm, called graph growing structure learning (GGSL), to learn Bayesian network (BN) structures. GGSL starts at a (random) node and then gradually expands the learned structure through a series of local learning steps. At each local learning step, the proposed algorithm only needs to revisit a subset of the learned nodes, consisting of the local neighborhood of a target, and therefore improves on both memory and time efficiency compared to traditional global structure learning approaches. GGSL also improves on the existing local-to-global learning approaches by removing the need for conflict-resolving AND-rules, and achieves better learning accuracy. We provide theoretical analysis for the local learning step, and show that GGSL outperforms existing algorithms on benchmark datasets. Overall, GGSL demonstrates a novel direction to scale up BN structure learning while limiting accuracy loss.}\n}", "pdf": "http://proceedings.mlr.press/v70/gao17a/gao17a.pdf", "supp": "", "pdf_size": 553547, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6633817073949985325&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "IBM Thomas J. Watson Research Center; IBM Thomas J. Watson Research Center; IBM Thomas J. Watson Research Center", "aff_domain": "us.ibm.com; ; ", "email": "us.ibm.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/gao17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "IBM", "aff_unique_dep": "Research", "aff_unique_url": "https://www.ibm.com/research", "aff_unique_abbr": "IBM", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Yorktown Heights", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Logarithmic Time One-Against-Some", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/617", "id": "617", "author_site": "Hal Daum\u00e9, Nikos Karampatziakis, John Langford, Paul Mineiro", "author": "Hal Daum\u00e9 III; Nikos Karampatziakis; John Langford; Paul Mineiro", "abstract": "We create a new online reduction of multiclass classification to binary classification for which training and prediction time scale logarithmically with the number of classes. We show that several simple techniques give rise to an algorithm which is superior to previous logarithmic time classification approaches while competing with one-against-all in space. The core construction is based on using a tree to select a small subset of labels with high recall, which are then scored using a one-against-some structure with high precision.", "bibtex": "@InProceedings{pmlr-v70-daume17a,\n title = \t {Logarithmic Time One-Against-Some},\n author = {Daum{\\'e}, III, Hal and Nikos Karampatziakis and John Langford and Paul Mineiro},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {923--932},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/daume17a/daume17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/daume17a.html},\n abstract = \t {We create a new online reduction of multiclass classification to binary classification for which training and prediction time scale logarithmically with the number of classes. We show that several simple techniques give rise to an algorithm which is superior to previous logarithmic time classification approaches while competing with one-against-all in space. The core construction is based on using a tree to select a small subset of labels with high recall, which are then scored using a one-against-some structure with high precision.}\n}", "pdf": "http://proceedings.mlr.press/v70/daume17a/daume17a.pdf", "supp": "", "pdf_size": 394738, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13792503468575932686&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of Maryland; Microsoft; Microsoft; Microsoft", "aff_domain": "microsoft.com; ; ; ", "email": "microsoft.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/daume17a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "University of Maryland;Microsoft", "aff_unique_dep": ";Microsoft Corporation", "aff_unique_url": "https://www/umd.edu;https://www.microsoft.com", "aff_unique_abbr": "UMD;Microsoft", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Lost Relatives of the Gumbel Trick", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/463", "id": "463", "author_site": "Matej Balog, Nilesh Tripuraneni, Zoubin Ghahramani, Adrian Weller", "author": "Matej Balog; Nilesh Tripuraneni; Zoubin Ghahramani; Adrian Weller", "abstract": "The Gumbel trick is a method to sample from a discrete probability distribution, or to estimate its normalizing partition function. The method relies on repeatedly applying a random perturbation to the distribution in a particular way, each time solving for the most likely configuration. We derive an entire family of related methods, of which the Gumbel trick is one member, and show that the new methods have superior properties in several settings with minimal additional computational cost. In particular, for the Gumbel trick to yield computational benefits for discrete graphical models, Gumbel perturbations on all configurations are typically replaced with so-called low-rank perturbations. We show how a subfamily of our new methods adapts to this setting, proving new upper and lower bounds on the log partition function and deriving a family of sequential samplers for the Gibbs distribution. Finally, we balance the discussion by showing how the simpler analytical form of the Gumbel trick enables additional theoretical results.", "bibtex": "@InProceedings{pmlr-v70-balog17a,\n title = \t {Lost Relatives of the {G}umbel Trick},\n author = {Matej Balog and Nilesh Tripuraneni and Zoubin Ghahramani and Adrian Weller},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {371--379},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balog17a/balog17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/balog17a.html},\n abstract = \t {The Gumbel trick is a method to sample from a discrete probability distribution, or to estimate its normalizing partition function. The method relies on repeatedly applying a random perturbation to the distribution in a particular way, each time solving for the most likely configuration. We derive an entire family of related methods, of which the Gumbel trick is one member, and show that the new methods have superior properties in several settings with minimal additional computational cost. In particular, for the Gumbel trick to yield computational benefits for discrete graphical models, Gumbel perturbations on all configurations are typically replaced with so-called low-rank perturbations. We show how a subfamily of our new methods adapts to this setting, proving new upper and lower bounds on the log partition function and deriving a family of sequential samplers for the Gibbs distribution. Finally, we balance the discussion by showing how the simpler analytical form of the Gumbel trick enables additional theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v70/balog17a/balog17a.pdf", "supp": "", "pdf_size": 914304, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10880718622370900065&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "University of Cambridge, UK+MPI-IS, T\u00fcbingen, Germany; UC Berkeley, USA; University of Cambridge, UK+Uber AI Labs, USA; University of Cambridge, UK+Alan Turing Institute, UK", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "https://github.com/matejbalog/gumbel-relatives", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/balog17a.html", "aff_unique_index": "0+1;2;0+3;0+4", "aff_unique_norm": "University of Cambridge;Max Planck Institute for Intelligent Systems;University of California, Berkeley;Uber;Alan Turing Institute", "aff_unique_dep": ";;;Uber AI Labs;", "aff_unique_url": "https://www.cam.ac.uk;https://www.mpituebingen.mpg.de;https://www.berkeley.edu;https://www.uber.com;https://www.turing.ac.uk", "aff_unique_abbr": "Cambridge;MPI-IS;UC Berkeley;Uber;ATI", "aff_campus_unique_index": "0+1;2;0;0", "aff_campus_unique": "Cambridge;T\u00fcbingen;Berkeley;", "aff_country_unique_index": "0+1;2;0+2;0+0", "aff_country_unique": "United Kingdom;Germany;United States" }, { "title": "MEC: Memory-efficient Convolution for Deep Neural Network", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/568", "id": "568", "author_site": "Minsik Cho, Daniel Brand", "author": "Minsik Cho; Daniel Brand", "abstract": "Convolution is a critical component in modern deep neural networks, thus several algorithms for convolution have been developed. Direct convolution is simple but suffers from poor performance. As an alternative, multiple indirect methods have been proposed including im2col-based convolution, FFT-based convolution, or Winograd-based algorithm. However, all these indirect methods have high memory overhead, which creates performance degradation and offers a poor trade-off between performance and memory consumption. In this work, we propose a memory-efficient convolution or MEC with compact lowering, which reduces memory overhead substantially and accelerates convolution process. MEC lowers the input matrix in a simple yet efficient/compact way (i.e., much less memory overhead), and then executes multiple small matrix multiplications in parallel to get convolution completed. Additionally, the reduced memory footprint improves memory sub-system efficiency, improving performance. Our experimental results show that MEC reduces memory consumption significantly with good speedup on both mobile and server platforms, compared with other indirect convolution algorithms.", "bibtex": "@InProceedings{pmlr-v70-cho17a,\n title = \t {{MEC}: Memory-efficient Convolution for Deep Neural Network},\n author = {Minsik Cho and Daniel Brand},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {815--824},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cho17a/cho17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cho17a.html},\n abstract = \t {Convolution is a critical component in modern deep neural networks, thus several algorithms for convolution have been developed. Direct convolution is simple but suffers from poor performance. As an alternative, multiple indirect methods have been proposed including im2col-based convolution, FFT-based convolution, or Winograd-based algorithm. However, all these indirect methods have high memory overhead, which creates performance degradation and offers a poor trade-off between performance and memory consumption. In this work, we propose a memory-efficient convolution or MEC with compact lowering, which reduces memory overhead substantially and accelerates convolution process. MEC lowers the input matrix in a simple yet efficient/compact way (i.e., much less memory overhead), and then executes multiple small matrix multiplications in parallel to get convolution completed. Additionally, the reduced memory footprint improves memory sub-system efficiency, improving performance. Our experimental results show that MEC reduces memory consumption significantly with good speedup on both mobile and server platforms, compared with other indirect convolution algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/cho17a/cho17a.pdf", "supp": "", "pdf_size": 247143, "gs_citation": 162, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14974077017510733530&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "IBM T. J. Watson Research Center, NY, USA; IBM T. J. Watson Research Center, NY, USA", "aff_domain": "us.ibm.com; ", "email": "us.ibm.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/cho17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "IBM", "aff_unique_dep": "IBM T. J. Watson Research Center", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM Watson", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Yorktown Heights", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Magnetic Hamiltonian Monte Carlo", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/508", "id": "508", "author_site": "Nilesh Tripuraneni, Mark Rowland, Zoubin Ghahramani, Richard E Turner", "author": "Nilesh Tripuraneni; Mark Rowland; Zoubin Ghahramani; Richard Turner", "abstract": "Hamiltonian Monte Carlo (HMC) exploits Hamiltonian dynamics to construct efficient proposals for Markov chain Monte Carlo (MCMC). In this paper, we present a generalization of HMC which exploits non-canonical Hamiltonian dynamics. We refer to this algorithm as magnetic HMC, since in 3 dimensions a subset of the dynamics map onto the mechanics of a charged particle coupled to a magnetic field. We establish a theoretical basis for the use of non-canonical Hamiltonian dynamics in MCMC, and construct a symplectic, leapfrog-like integrator allowing for the implementation of magnetic HMC. Finally, we exhibit several examples where these non-canonical dynamics can lead to improved mixing of magnetic HMC relative to ordinary HMC.", "bibtex": "@InProceedings{pmlr-v70-tripuraneni17a,\n title = \t {Magnetic {H}amiltonian {M}onte {C}arlo},\n author = {Nilesh Tripuraneni and Mark Rowland and Zoubin Ghahramani and Richard Turner},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3453--3461},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tripuraneni17a/tripuraneni17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tripuraneni17a.html},\n abstract = \t {Hamiltonian Monte Carlo (HMC) exploits Hamiltonian dynamics to construct efficient proposals for Markov chain Monte Carlo (MCMC). In this paper, we present a generalization of HMC which exploits non-canonical Hamiltonian dynamics. We refer to this algorithm as magnetic HMC, since in 3 dimensions a subset of the dynamics map onto the mechanics of a charged particle coupled to a magnetic field. We establish a theoretical basis for the use of non-canonical Hamiltonian dynamics in MCMC, and construct a symplectic, leapfrog-like integrator allowing for the implementation of magnetic HMC. Finally, we exhibit several examples where these non-canonical dynamics can lead to improved mixing of magnetic HMC relative to ordinary HMC.}\n}", "pdf": "http://proceedings.mlr.press/v70/tripuraneni17a/tripuraneni17a.pdf", "supp": "", "pdf_size": 1659611, "gs_citation": 47, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4742819079003749484&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": ";;;", "aff_domain": ";;;", "email": ";;;", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/tripuraneni17a.html" }, { "title": "Max-value Entropy Search for Efficient Bayesian Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/630", "id": "630", "author_site": "Zi Wang, Stefanie Jegelka", "author": "Zi Wang; Stefanie Jegelka", "abstract": "Entropy Search (ES) and Predictive Entropy Search (PES) are popular and empirically successful Bayesian Optimization techniques. Both rely on a compelling information-theoretic motivation, and maximize the information gained about the $\\arg\\max$ of the unknown function; yet, both are plagued by the expensive computation for estimating entropies. We propose a new criterion, Max-value Entropy Search (MES), that instead uses the information about the maximum function value. We show relations of MES to other Bayesian optimization methods, and establish a regret bound. We observe that MES maintains or improves the good empirical performance of ES/PES, while tremendously lightening the computational burden. In particular, MES is much more robust to the number of samples used for computing the entropy, and hence more efficient for higher dimensional problems.", "bibtex": "@InProceedings{pmlr-v70-wang17e,\n title = \t {Max-value Entropy Search for Efficient {B}ayesian Optimization},\n author = {Zi Wang and Stefanie Jegelka},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3627--3635},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17e/wang17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17e.html},\n abstract = \t {Entropy Search (ES) and Predictive Entropy Search (PES) are popular and empirically successful Bayesian Optimization techniques. Both rely on a compelling information-theoretic motivation, and maximize the information gained about the $\\arg\\max$ of the unknown function; yet, both are plagued by the expensive computation for estimating entropies. We propose a new criterion, Max-value Entropy Search (MES), that instead uses the information about the maximum function value. We show relations of MES to other Bayesian optimization methods, and establish a regret bound. We observe that MES maintains or improves the good empirical performance of ES/PES, while tremendously lightening the computational burden. In particular, MES is much more robust to the number of samples used for computing the entropy, and hence more efficient for higher dimensional problems.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17e/wang17e.pdf", "supp": "", "pdf_size": 647563, "gs_citation": 552, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1105583789232319207&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Computer Science and Arti\ufb01cial Intelligence Laboratory, Massachusetts Institute of Technology, Massachusetts, USA; Computer Science and Arti\ufb01cial Intelligence Laboratory, Massachusetts Institute of Technology, Massachusetts, USA", "aff_domain": "csail.mit.edu;csail.mit.edu", "email": "csail.mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/wang17e.html", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science and Arti\ufb01cial Intelligence Laboratory", "aff_unique_url": "https://www.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Massachusetts", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Maximum Selection and Ranking under Noisy Comparisons", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/881", "id": "881", "author_site": "Moein Falahatgar, Alon Orlitsky, Venkatadheeraj Pichapati, Ananda Theertha Suresh", "author": "Moein Falahatgar; Alon Orlitsky; Venkatadheeraj Pichapati; Ananda Theertha Suresh", "abstract": "We consider $(\\epsilon,\\delta)$-PAC maximum-selection and ranking using pairwise comparisons for general probabilistic models whose comparison probabilities satisfy strong stochastic transitivity and stochastic triangle inequality. Modifying the popular knockout tournament, we propose a simple maximum-selection algorithm that uses $\\mathcal{O}\\left(\\frac{n}{\\epsilon^2} \\left(1+\\log \\frac1{\\delta}\\right)\\right)$ comparisons, optimal up to a constant factor. We then derive a general framework that uses noisy binary search to speed up many ranking algorithms, and combine it with merge sort to obtain a ranking algorithm that uses $\\mathcal{O}\\left(\\frac n{\\epsilon^2}\\log n(\\log \\log n)^3\\right)$ comparisons for $\\delta=\\frac1n$, optimal up to a $(\\log \\log n)^3$ factor.", "bibtex": "@InProceedings{pmlr-v70-falahatgar17a,\n title = \t {Maximum Selection and Ranking under Noisy Comparisons},\n author = {Moein Falahatgar and Alon Orlitsky and Venkatadheeraj Pichapati and Ananda Theertha Suresh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1088--1096},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/falahatgar17a/falahatgar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/falahatgar17a.html},\n abstract = \t {We consider $(\\epsilon,\\delta)$-PAC maximum-selection and ranking using pairwise comparisons for general probabilistic models whose comparison probabilities satisfy strong stochastic transitivity and stochastic triangle inequality. Modifying the popular knockout tournament, we propose a simple maximum-selection algorithm that uses $\\mathcal{O}\\left(\\frac{n}{\\epsilon^2} \\left(1+\\log \\frac1{\\delta}\\right)\\right)$ comparisons, optimal up to a constant factor. We then derive a general framework that uses noisy binary search to speed up many ranking algorithms, and combine it with merge sort to obtain a ranking algorithm that uses $\\mathcal{O}\\left(\\frac n{\\epsilon^2}\\log n(\\log \\log n)^3\\right)$ comparisons for $\\delta=\\frac1n$, optimal up to a $(\\log \\log n)^3$ factor.}\n}", "pdf": "http://proceedings.mlr.press/v70/falahatgar17a/falahatgar17a.pdf", "supp": "", "pdf_size": 459113, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2589895315949422578&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego; Google Research", "aff_domain": "ucsd.edu; ; ; ", "email": "ucsd.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/falahatgar17a.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of California, San Diego;Google", "aff_unique_dep": ";Google Research", "aff_unique_url": "https://www.ucsd.edu;https://research.google", "aff_unique_abbr": "UCSD;Google Research", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "San Diego;Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "McGan: Mean and Covariance Feature Matching GAN", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/705", "id": "705", "author_site": "Youssef Mroueh, Tom Sercu, Vaibhava Goel", "author": "Youssef Mroueh; Tom Sercu; Vaibhava Goel", "abstract": "We introduce new families of Integral Probability Metrics (IPM) for training Generative Adversarial Networks (GAN). Our IPMs are based on matching statistics of distributions embedded in a finite dimensional feature space. Mean and covariance feature matching IPMs allow for stable training of GANs, which we will call McGan. McGan minimizes a meaningful loss between distributions.", "bibtex": "@InProceedings{pmlr-v70-mroueh17a,\n title = \t {{M}c{G}an: Mean and Covariance Feature Matching {GAN}},\n author = {Youssef Mroueh and Tom Sercu and Vaibhava Goel},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2527--2535},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mroueh17a/mroueh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mroueh17a.html},\n abstract = \t {We introduce new families of Integral Probability Metrics (IPM) for training Generative Adversarial Networks (GAN). Our IPMs are based on matching statistics of distributions embedded in a finite dimensional feature space. Mean and covariance feature matching IPMs allow for stable training of GANs, which we will call McGan. McGan minimizes a meaningful loss between distributions.}\n}", "pdf": "http://proceedings.mlr.press/v70/mroueh17a/mroueh17a.pdf", "supp": "", "pdf_size": 3582409, "gs_citation": 177, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6080524162434255745&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "AI Foundations. IBM T.J. Watson Research Center, NY, USA+Watson Multimodal Algorithms and Engines Group. IBM T.J. Watson Research Center, NY, USA; AI Foundations. IBM T.J. Watson Research Center, NY, USA+Watson Multimodal Algorithms and Engines Group. IBM T.J. Watson Research Center, NY, USA; Watson Multimodal Algorithms and Engines Group. IBM T.J. Watson Research Center, NY, USA", "aff_domain": "us.ibm.com; ; ", "email": "us.ibm.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mroueh17a.html", "aff_unique_index": "0+0;0+0;0", "aff_unique_norm": "IBM", "aff_unique_dep": "AI Foundations", "aff_unique_url": "https://www.ibm.com/research/watson", "aff_unique_abbr": "IBM Watson", "aff_campus_unique_index": "0+0;0+0;0", "aff_campus_unique": "NY", "aff_country_unique_index": "0+0;0+0;0", "aff_country_unique": "United States" }, { "title": "Measuring Sample Quality with Kernels", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/526", "id": "526", "author_site": "Jackson Gorham, Lester Mackey", "author": "Jackson Gorham; Lester Mackey", "abstract": "Approximate Markov chain Monte Carlo (MCMC) offers the promise of more rapid sampling at the cost of more biased inference. Since standard MCMC diagnostics fail to detect these biases, researchers have developed computable Stein discrepancy measures that provably determine the convergence of a sample to its target distribution. This approach was recently combined with the theory of reproducing kernels to define a closed-form kernel Stein discrepancy (KSD) computable by summing kernel evaluations across pairs of sample points. We develop a theory of weak convergence for KSDs based on Stein\u2019s method, demonstrate that commonly used KSDs fail to detect non-convergence even for Gaussian targets, and show that kernels with slowly decaying tails provably determine convergence for a large class of target distributions. The resulting convergence-determining KSDs are suitable for comparing biased, exact, and deterministic sample sequences and simpler to compute and parallelize than alternative Stein discrepancies. We use our tools to compare biased samplers, select sampler hyperparameters, and improve upon existing KSD approaches to one-sample hypothesis testing and sample quality improvement.", "bibtex": "@InProceedings{pmlr-v70-gorham17a,\n title = \t {Measuring Sample Quality with Kernels},\n author = {Jackson Gorham and Lester Mackey},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1292--1301},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gorham17a/gorham17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gorham17a.html},\n abstract = \t {Approximate Markov chain Monte Carlo (MCMC) offers the promise of more rapid sampling at the cost of more biased inference. Since standard MCMC diagnostics fail to detect these biases, researchers have developed computable Stein discrepancy measures that provably determine the convergence of a sample to its target distribution. This approach was recently combined with the theory of reproducing kernels to define a closed-form kernel Stein discrepancy (KSD) computable by summing kernel evaluations across pairs of sample points. We develop a theory of weak convergence for KSDs based on Stein\u2019s method, demonstrate that commonly used KSDs fail to detect non-convergence even for Gaussian targets, and show that kernels with slowly decaying tails provably determine convergence for a large class of target distributions. The resulting convergence-determining KSDs are suitable for comparing biased, exact, and deterministic sample sequences and simpler to compute and parallelize than alternative Stein discrepancies. We use our tools to compare biased samplers, select sampler hyperparameters, and improve upon existing KSD approaches to one-sample hypothesis testing and sample quality improvement.}\n}", "pdf": "http://proceedings.mlr.press/v70/gorham17a/gorham17a.pdf", "supp": "", "pdf_size": 943987, "gs_citation": 268, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17514039426076755054&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stanford University, Palo Alto, CA USA; Microsoft Research New England, Cambridge, MA USA", "aff_domain": "stanford.edu;microsoft.com", "email": "stanford.edu;microsoft.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/gorham17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Stanford University;Microsoft", "aff_unique_dep": ";Microsoft Research New England", "aff_unique_url": "https://www.stanford.edu;https://www.microsoft.com/en-us/research/group/new-england", "aff_unique_abbr": "Stanford;MSR NE", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Palo Alto;Cambridge", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Meritocratic Fairness for Cross-Population Selection", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/744", "id": "744", "author_site": "Michael Kearns, Aaron Roth, Steven Wu", "author": "Michael Kearns; Aaron Roth; Zhiwei Steven Wu", "abstract": "We consider the problem of selecting a strong pool of individuals from several populations with incomparable skills (e.g. soccer players, mathematicians, and singers) in a fair manner. The quality of an individual is defined to be their relative rank (by cumulative distribution value) within their own population, which permits cross-population comparisons. We study algorithms which attempt to select the highest quality subset despite the fact that true CDF values are not known, and can only be estimated from the finite pool of candidates. Specifically, we quantify the regret in quality imposed by \u201cmeritocratic\u201d notions of fairness, which require that individuals are selected with probability that is monotonically increasing in their true quality. We give algorithms with provable fairness and regret guarantees, as well as lower bounds, and provide empirical results which suggest that our algorithms perform better than the theory suggests. We extend our results to a sequential batch setting, in which an algorithm must repeatedly select subsets of individuals from new pools of applicants, but has the benefit of being able to compare them to the accumulated data from previous rounds.", "bibtex": "@InProceedings{pmlr-v70-kearns17a,\n title = \t {Meritocratic Fairness for Cross-Population Selection},\n author = {Michael Kearns and Aaron Roth and Zhiwei Steven Wu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1828--1836},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kearns17a/kearns17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kearns17a.html},\n abstract = \t {We consider the problem of selecting a strong pool of individuals from several populations with incomparable skills (e.g. soccer players, mathematicians, and singers) in a fair manner. The quality of an individual is defined to be their relative rank (by cumulative distribution value) within their own population, which permits cross-population comparisons. We study algorithms which attempt to select the highest quality subset despite the fact that true CDF values are not known, and can only be estimated from the finite pool of candidates. Specifically, we quantify the regret in quality imposed by \u201cmeritocratic\u201d notions of fairness, which require that individuals are selected with probability that is monotonically increasing in their true quality. We give algorithms with provable fairness and regret guarantees, as well as lower bounds, and provide empirical results which suggest that our algorithms perform better than the theory suggests. We extend our results to a sequential batch setting, in which an algorithm must repeatedly select subsets of individuals from new pools of applicants, but has the benefit of being able to compare them to the accumulated data from previous rounds.}\n}", "pdf": "http://proceedings.mlr.press/v70/kearns17a/kearns17a.pdf", "supp": "", "pdf_size": 377798, "gs_citation": 73, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1701287205757957846&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kearns17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Pennsylvania", "aff_unique_dep": "", "aff_unique_url": "https://www.upenn.edu", "aff_unique_abbr": "UPenn", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Meta Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/702", "id": "702", "author_site": "Tsendsuren Munkhdalai, Hong Yu", "author": "Tsendsuren Munkhdalai; Hong Yu", "abstract": "Neural networks have been successfully applied in applications with a large amount of labeled data. However, the task of rapid generalization on new concepts with small training data while preserving performances on previously learned ones still presents a significant challenge to neural network models. In this work, we introduce a novel meta learning method, Meta Networks (MetaNet), that learns a meta-level knowledge across tasks and shifts its inductive biases via fast parameterization for rapid generalization. When evaluated on Omniglot and Mini-ImageNet benchmarks, our MetaNet models achieve a near human-level performance and outperform the baseline approaches by up to 6\\% accuracy. We demonstrate several appealing properties of MetaNet relating to generalization and continual learning.", "bibtex": "@InProceedings{pmlr-v70-munkhdalai17a,\n title = \t {Meta Networks},\n author = {Tsendsuren Munkhdalai and Hong Yu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2554--2563},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/munkhdalai17a/munkhdalai17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/munkhdalai17a.html},\n abstract = \t {Neural networks have been successfully applied in applications with a large amount of labeled data. However, the task of rapid generalization on new concepts with small training data while preserving performances on previously learned ones still presents a significant challenge to neural network models. In this work, we introduce a novel meta learning method, Meta Networks (MetaNet), that learns a meta-level knowledge across tasks and shifts its inductive biases via fast parameterization for rapid generalization. When evaluated on Omniglot and Mini-ImageNet benchmarks, our MetaNet models achieve a near human-level performance and outperform the baseline approaches by up to 6\\% accuracy. We demonstrate several appealing properties of MetaNet relating to generalization and continual learning.}\n}", "pdf": "http://proceedings.mlr.press/v70/munkhdalai17a/munkhdalai17a.pdf", "supp": "", "pdf_size": 416056, "gs_citation": 1399, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=373430955020553964&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Massachusetts, MA, USA; University of Massachusetts, MA, USA", "aff_domain": "umassmed.edu; ", "email": "umassmed.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/munkhdalai17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Massachusetts", "aff_unique_dep": "", "aff_unique_url": "https://www.umass.edu", "aff_unique_abbr": "UMass", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Amherst", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Minimax Regret Bounds for Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/774", "id": "774", "author_site": "Mohammad Gheshlaghi Azar, Ian Osband, Remi Munos", "author": "Mohammad Gheshlaghi Azar; Ian Osband; R\u00e9mi Munos", "abstract": "We consider the problem of provably optimal exploration in reinforcement learning for finite horizon MDPs. We show that an optimistic modification to value iteration achieves a regret bound of $\\tilde {O}( \\sqrt{HSAT} + H^2S^2A+H\\sqrt{T})$ where $H$ is the time horizon, $S$ the number of states, $A$ the number of actions and $T$ the number of time-steps. This result improves over the best previous known bound $\\tilde {O}(HS \\sqrt{AT})$ achieved by the UCRL2 algorithm. The key significance of our new results is that when $T\\geq H^3S^3A$ and $SA\\geq H$, it leads to a regret of $\\tilde{O}(\\sqrt{HSAT})$ that matches the established lower bound of $\\Omega(\\sqrt{HSAT})$ up to a logarithmic factor. Our analysis contain two key insights. We use careful application of concentration inequalities to the optimal value function as a whole, rather than to the transitions probabilities (to improve scaling in $S$), and we define Bernstein-based \u201cexploration bonuses\u201d that use the empirical variance of the estimated values at the next states (to improve scaling in $H$).", "bibtex": "@InProceedings{pmlr-v70-azar17a,\n title = \t {Minimax Regret Bounds for Reinforcement Learning},\n author = {Mohammad Gheshlaghi Azar and Ian Osband and R{\\'e}mi Munos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {263--272},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/azar17a/azar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/azar17a.html},\n abstract = \t {We consider the problem of provably optimal exploration in reinforcement learning for finite horizon MDPs. We show that an optimistic modification to value iteration achieves a regret bound of $\\tilde {O}( \\sqrt{HSAT} + H^2S^2A+H\\sqrt{T})$ where $H$ is the time horizon, $S$ the number of states, $A$ the number of actions and $T$ the number of time-steps. This result improves over the best previous known bound $\\tilde {O}(HS \\sqrt{AT})$ achieved by the UCRL2 algorithm. The key significance of our new results is that when $T\\geq H^3S^3A$ and $SA\\geq H$, it leads to a regret of $\\tilde{O}(\\sqrt{HSAT})$ that matches the established lower bound of $\\Omega(\\sqrt{HSAT})$ up to a logarithmic factor. Our analysis contain two key insights. We use careful application of concentration inequalities to the optimal value function as a whole, rather than to the transitions probabilities (to improve scaling in $S$), and we define Bernstein-based \u201cexploration bonuses\u201d that use the empirical variance of the estimated values at the next states (to improve scaling in $H$).}\n}", "pdf": "http://proceedings.mlr.press/v70/azar17a/azar17a.pdf", "supp": "", "pdf_size": 414786, "gs_citation": 939, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9016723800402069962&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, UK; DeepMind, London, UK; DeepMind, London, UK", "aff_domain": "google.com; ; ", "email": "google.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/azar17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Minimizing Trust Leaks for Robust Sybil Detection", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/669", "id": "669", "author_site": "J\u00e1nos H\u00f6ner, Shinichi Nakajima, Alexander Bauer, Klaus-robert Mueller, Nico G\u00f6rnitz", "author": "J\u00e1nos H\u00f6ner; Shinichi Nakajima; Alexander Bauer; Klaus-Robert M\u00fcller; Nico G\u00f6rnitz", "abstract": "Sybil detection is a crucial task to protect online social networks (OSNs) against intruders who try to manipulate automatic services provided by OSNs to their customers. In this paper, we first discuss the robustness of graph-based Sybil detectors SybilRank and Integro and refine theoretically their security guarantees towards more realistic assumptions. After that, we formally introduce adversarial settings for the graph-based Sybil detection problem and derive a corresponding optimal attacking strategy by exploitation of trust leaks. Based on our analysis, we propose transductive Sybil ranking (TSR), a robust extension to SybilRank and Integro that directly minimizes trust leaks. Our empirical evaluation shows significant advantages of TSR over state-of-the-art competitors on a variety of attacking scenarios on artificially generated data and real-world datasets.", "bibtex": "@InProceedings{pmlr-v70-honer17a,\n title = \t {Minimizing Trust Leaks for Robust {S}ybil Detection},\n author = {J{\\'a}nos H{\\\"o}ner and Shinichi Nakajima and Alexander Bauer and Klaus-Robert M{\\\"u}ller and Nico G{\\\"o}rnitz},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1520--1528},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/honer17a/honer17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/honer17a.html},\n abstract = \t {Sybil detection is a crucial task to protect online social networks (OSNs) against intruders who try to manipulate automatic services provided by OSNs to their customers. In this paper, we first discuss the robustness of graph-based Sybil detectors SybilRank and Integro and refine theoretically their security guarantees towards more realistic assumptions. After that, we formally introduce adversarial settings for the graph-based Sybil detection problem and derive a corresponding optimal attacking strategy by exploitation of trust leaks. Based on our analysis, we propose transductive Sybil ranking (TSR), a robust extension to SybilRank and Integro that directly minimizes trust leaks. Our empirical evaluation shows significant advantages of TSR over state-of-the-art competitors on a variety of attacking scenarios on artificially generated data and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/honer17a/honer17a.pdf", "supp": "", "pdf_size": 1463096, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=129048646900913651&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "MathPlan; Machine Learning Group, Berlin Institute of Technology + Berlin Big Data Center + Max Planck Society + Korea University; Machine Learning Group, Berlin Institute of Technology + Berlin Big Data Center + Max Planck Society; Machine Learning Group, Berlin Institute of Technology + Berlin Big Data Center + Max Planck Society + Korea University; Machine Learning Group, Berlin Institute of Technology", "aff_domain": "campus.tu-berlin.de; ; ;tu-berlin.de;tu-berlin.de", "email": "campus.tu-berlin.de; ; ;tu-berlin.de;tu-berlin.de", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/honer17a.html", "aff_unique_index": "0;1+2+3+4;1+2+3;1+2+3+4;1", "aff_unique_norm": "MathPlan;Berlin Institute of Technology;Berlin Big Data Center;Max Planck Society;Korea University", "aff_unique_dep": ";Machine Learning Group;;;", "aff_unique_url": ";https://www.tu-berlin.de;https://www.berlinbigdata.de;https://www.mpg.de;https://www.korea.ac.kr", "aff_unique_abbr": ";TU Berlin;;MPG;KU", "aff_campus_unique_index": "1;1;1;1", "aff_campus_unique": ";Berlin", "aff_country_unique_index": "1+1+1+2;1+1+1;1+1+1+2;1", "aff_country_unique": ";Germany;South Korea" }, { "title": "Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/495", "id": "495", "author_site": "Chelsea Finn, Pieter Abbeel, Sergey Levine", "author": "Chelsea Finn; Pieter Abbeel; Sergey Levine", "abstract": "We propose an algorithm for meta-learning that is model-agnostic, in the sense that it is compatible with any model trained with gradient descent and applicable to a variety of different learning problems, including classification, regression, and reinforcement learning. The goal of meta-learning is to train a model on a variety of learning tasks, such that it can solve new learning tasks using only a small number of training samples. In our approach, the parameters of the model are explicitly trained such that a small number of gradient steps with a small amount of training data from a new task will produce good generalization performance on that task. In effect, our method trains the model to be easy to fine-tune. We demonstrate that this approach leads to state-of-the-art performance on two few-shot image classification benchmarks, produces good results on few-shot regression, and accelerates fine-tuning for policy gradient reinforcement learning with neural network policies.", "bibtex": "@InProceedings{pmlr-v70-finn17a,\n title = \t {Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks},\n author = {Chelsea Finn and Pieter Abbeel and Sergey Levine},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1126--1135},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/finn17a/finn17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/finn17a.html},\n abstract = \t {We propose an algorithm for meta-learning that is model-agnostic, in the sense that it is compatible with any model trained with gradient descent and applicable to a variety of different learning problems, including classification, regression, and reinforcement learning. The goal of meta-learning is to train a model on a variety of learning tasks, such that it can solve new learning tasks using only a small number of training samples. In our approach, the parameters of the model are explicitly trained such that a small number of gradient steps with a small amount of training data from a new task will produce good generalization performance on that task. In effect, our method trains the model to be easy to fine-tune. We demonstrate that this approach leads to state-of-the-art performance on two few-shot image classification benchmarks, produces good results on few-shot regression, and accelerates fine-tuning for policy gradient reinforcement learning with neural network policies.}\n}", "pdf": "http://proceedings.mlr.press/v70/finn17a/finn17a.pdf", "supp": "", "pdf_size": 2784662, "gs_citation": 15661, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17278604844873996878&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 16, "aff": "University of California, Berkeley; University of California, Berkeley + OpenAI; University of California, Berkeley", "aff_domain": "eecs.berkeley.edu; ; ", "email": "eecs.berkeley.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/finn17a.html", "aff_unique_index": "0;0+1;0", "aff_unique_norm": "University of California, Berkeley;OpenAI", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://openai.com", "aff_unique_abbr": "UC Berkeley;OpenAI", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "United States" }, { "title": "Model-Independent Online Learning for Influence Maximization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/639", "id": "639", "author_site": "Sharan Vaswani, Branislav Kveton, Zheng Wen, Mohammad Ghavamzadeh, Laks V.S Lakshmanan, Mark Schmidt", "author": "Sharan Vaswani; Branislav Kveton; Zheng Wen; Mohammad Ghavamzadeh; Laks V. S. Lakshmanan; Mark Schmidt", "abstract": "We consider", "bibtex": "@InProceedings{pmlr-v70-vaswani17a,\n title = \t {Model-Independent Online Learning for Influence Maximization},\n author = {Sharan Vaswani and Branislav Kveton and Zheng Wen and Mohammad Ghavamzadeh and Laks V. S. Lakshmanan and Mark Schmidt},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3530--3539},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/vaswani17a/vaswani17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/vaswani17a.html},\n abstract = \t {We consider", "pdf": "http://proceedings.mlr.press/v70/vaswani17a/vaswani17a.pdf", "supp": "", "pdf_size": 1061253, "gs_citation": 76, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10487679220748356123&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of British Columbia; Adobe Research; Adobe Research; DeepMind; University of British Columbia; University of British Columbia", "aff_domain": "cs.ubc.ca; ; ; ; ; ", "email": "cs.ubc.ca; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/vaswani17a.html", "aff_unique_index": "0;1;1;2;0;0", "aff_unique_norm": "University of British Columbia;Adobe;DeepMind", "aff_unique_dep": ";Adobe Research;", "aff_unique_url": "https://www.ubc.ca;https://research.adobe.com;https://deepmind.com", "aff_unique_abbr": "UBC;Adobe;DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;1;2;0;0", "aff_country_unique": "Canada;United States;United Kingdom" }, { "title": "Modular Multitask Reinforcement Learning with Policy Sketches", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/548", "id": "548", "author_site": "Jacob Andreas, Dan Klein, Sergey Levine", "author": "Jacob Andreas; Dan Klein; Sergey Levine", "abstract": "We describe a framework for multitask deep reinforcement learning guided by policy sketches. Sketches annotate tasks with sequences of named subtasks, providing information about high-level structural relationships among tasks but not how to implement them\u2014specifically not providing the detailed guidance used by much previous work on learning policy abstractions for RL (e.g. intermediate rewards, subtask completion signals, or intrinsic motivations). To learn from sketches, we present a model that associates every subtask with a modular subpolicy, and jointly maximizes reward over full task-specific policies by tying parameters across shared subpolicies. Optimization is accomplished via a decoupled actor\u2013critic training objective that facilitates learning common behaviors from multiple dissimilar reward functions. We evaluate the effectiveness of our approach in three environments featuring both discrete and continuous control, and with sparse rewards that can be obtained only after completing a number of high-level subgoals. Experiments show that using our approach to learn policies guided by sketches gives better performance than existing techniques for learning task-specific or shared policies, while naturally inducing a library of interpretable primitive behaviors that can be recombined to rapidly adapt to new tasks.", "bibtex": "@InProceedings{pmlr-v70-andreas17a,\n title = \t {Modular Multitask Reinforcement Learning with Policy Sketches},\n author = {Jacob Andreas and Dan Klein and Sergey Levine},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {166--175},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/andreas17a/andreas17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/andreas17a.html},\n abstract = \t {We describe a framework for multitask deep reinforcement learning guided by policy sketches. Sketches annotate tasks with sequences of named subtasks, providing information about high-level structural relationships among tasks but not how to implement them\u2014specifically not providing the detailed guidance used by much previous work on learning policy abstractions for RL (e.g. intermediate rewards, subtask completion signals, or intrinsic motivations). To learn from sketches, we present a model that associates every subtask with a modular subpolicy, and jointly maximizes reward over full task-specific policies by tying parameters across shared subpolicies. Optimization is accomplished via a decoupled actor\u2013critic training objective that facilitates learning common behaviors from multiple dissimilar reward functions. We evaluate the effectiveness of our approach in three environments featuring both discrete and continuous control, and with sparse rewards that can be obtained only after completing a number of high-level subgoals. Experiments show that using our approach to learn policies guided by sketches gives better performance than existing techniques for learning task-specific or shared policies, while naturally inducing a library of interpretable primitive behaviors that can be recombined to rapidly adapt to new tasks.}\n}", "pdf": "http://proceedings.mlr.press/v70/andreas17a/andreas17a.pdf", "supp": "", "pdf_size": 2597200, "gs_citation": 602, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5436796240835430868&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", "aff_domain": "cs.berkeley.edu; ; ", "email": "cs.berkeley.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/andreas17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Multi-Class Optimal Margin Distribution Machine", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/823", "id": "823", "author_site": "Teng Zhang, Zhi-Hua Zhou", "author": "Teng Zhang; Zhi-Hua Zhou", "abstract": "Recent studies disclose that maximizing the minimum margin like support vector machines does not necessarily lead to better generalization performances, and instead, it is crucial to optimize the margin distribution. Although it has been shown that for binary classification, characterizing the margin distribution by the first- and second-order statistics can achieve superior performance. It still remains open for multi-class classification, and due to the complexity of margin for multi-class classification, optimizing its distribution by mean and variance can also be difficult. In this paper, we propose mcODM (multi-class Optimal margin Distribution Machine), which can solve this problem efficiently. We also give a theoretical analysis for our method, which verifies the significance of margin distribution for multi-class classification. Empirical study further shows that mcODM always outperforms all four versions of multi-class SVMs on all experimental data sets.", "bibtex": "@InProceedings{pmlr-v70-zhang17h,\n title = \t {Multi-Class Optimal Margin Distribution Machine},\n author = {Teng Zhang and Zhi-Hua Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4063--4071},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17h/zhang17h.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17h.html},\n abstract = \t {Recent studies disclose that maximizing the minimum margin like support vector machines does not necessarily lead to better generalization performances, and instead, it is crucial to optimize the margin distribution. Although it has been shown that for binary classification, characterizing the margin distribution by the first- and second-order statistics can achieve superior performance. It still remains open for multi-class classification, and due to the complexity of margin for multi-class classification, optimizing its distribution by mean and variance can also be difficult. In this paper, we propose mcODM (multi-class Optimal margin Distribution Machine), which can solve this problem efficiently. We also give a theoretical analysis for our method, which verifies the significance of margin distribution for multi-class classification. Empirical study further shows that mcODM always outperforms all four versions of multi-class SVMs on all experimental data sets.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17h/zhang17h.pdf", "supp": "", "pdf_size": 164911, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10102202494504886991&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China; National Key Laboratory for Novel Software Technology, Nanjing University, Nanjing 210023, China", "aff_domain": "lamda.nju.edu.cn;lamda.nju.edu.cn", "email": "lamda.nju.edu.cn;lamda.nju.edu.cn", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/zhang17h.html", "aff_unique_index": "0;0", "aff_unique_norm": "Nanjing University", "aff_unique_dep": "National Key Laboratory for Novel Software Technology", "aff_unique_url": "http://www.nju.edu.cn", "aff_unique_abbr": "Nanjing U", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Nanjing", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Multi-fidelity Bayesian Optimisation with Continuous Approximations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/596", "id": "596", "author_site": "kirthevasan kandasamy, Gautam Dasarathy, Barnab\u00e1s P\u00f3czos, Jeff Schneider", "author": "Kirthevasan Kandasamy; Gautam Dasarathy; Jeff Schneider; Barnab\u00e1s P\u00f3czos", "abstract": "Bandit methods for black-box optimisation, such as Bayesian optimisation, are used in a variety of applications including hyper-parameter tuning and experiment design. Recently,", "bibtex": "@InProceedings{pmlr-v70-kandasamy17a,\n title = \t {Multi-fidelity {B}ayesian Optimisation with Continuous Approximations},\n author = {Kirthevasan Kandasamy and Gautam Dasarathy and Jeff Schneider and Barnab{\\'a}s P{\\'o}czos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1799--1808},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kandasamy17a/kandasamy17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kandasamy17a.html},\n abstract = \t {Bandit methods for black-box optimisation, such as Bayesian optimisation, are used in a variety of applications including hyper-parameter tuning and experiment design. Recently,", "pdf": "http://proceedings.mlr.press/v70/kandasamy17a/kandasamy17a.pdf", "supp": "", "pdf_size": 619569, "gs_citation": 285, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6604508853343703066&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Carnegie Mellon University, Pittsburgh PA, USA; Rice University, Houston TX, USA; Carnegie Mellon University, Pittsburgh PA, USA; Carnegie Mellon University, Pittsburgh PA, USA", "aff_domain": "cmu.edu; ; ; ", "email": "cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/kandasamy17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "Carnegie Mellon University;Rice University", "aff_unique_dep": ";", "aff_unique_url": "https://www.cmu.edu;https://www.rice.edu", "aff_unique_abbr": "CMU;Rice", "aff_campus_unique_index": "0;1;0;0", "aff_campus_unique": "Pittsburgh;Houston", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Multi-objective Bandits: Optimizing the Generalized Gini Index", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/677", "id": "677", "author_site": "Robert Busa-Fekete, Balazs Szorenyi, Paul Weng, Shie Mannor", "author": "R\u00f3bert Busa-Fekete; Bal\u00e1zs Sz\u00f6r\u00e9nyi; Paul Weng; Shie Mannor", "abstract": "We study the multi-armed bandit (MAB) problem where the agent receives a vectorial feedback that encodes many possibly competing objectives to be optimized. The goal of the agent is to find a policy, which can optimize these objectives simultaneously in a fair way. This multi-objective online optimization problem is formalized by using the Generalized Gini Index (GGI) aggregation function. We propose an online gradient descent algorithm which exploits the convexity of the GGI aggregation function, and controls the exploration in a careful way achieving a distribution-free regret $\\tilde{O}(T^{-1/2} )$ with high probability. We test our algorithm on synthetic data as well as on an electric battery control problem where the goal is to trade off the use of the different cells of a battery in order to balance their respective degradation rates.", "bibtex": "@InProceedings{pmlr-v70-busa-fekete17a,\n title = \t {Multi-objective Bandits: Optimizing the Generalized {G}ini Index},\n author = {R{\\'o}bert Busa-Fekete and Bal{\\'a}zs Sz{\\\"o}r{\\'e}nyi and Paul Weng and Shie Mannor},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {625--634},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/busa-fekete17a/busa-fekete17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/busa-fekete17a.html},\n abstract = \t {We study the multi-armed bandit (MAB) problem where the agent receives a vectorial feedback that encodes many possibly competing objectives to be optimized. The goal of the agent is to find a policy, which can optimize these objectives simultaneously in a fair way. This multi-objective online optimization problem is formalized by using the Generalized Gini Index (GGI) aggregation function. We propose an online gradient descent algorithm which exploits the convexity of the GGI aggregation function, and controls the exploration in a careful way achieving a distribution-free regret $\\tilde{O}(T^{-1/2} )$ with high probability. We test our algorithm on synthetic data as well as on an electric battery control problem where the goal is to trade off the use of the different cells of a battery in order to balance their respective degradation rates.}\n}", "pdf": "http://proceedings.mlr.press/v70/busa-fekete17a/busa-fekete17a.pdf", "supp": "", "pdf_size": 1074394, "gs_citation": 58, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8683752348819035689&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Yahoo Research, New York, NY, USA; Research Group on AI, Hungarian Acad. Sci. and Univ. of Szeged, Szeged, Hungary + Technion Institute of Technology, Haifa, Israel; SYSU-CMU JIE, SEIT, SYSU, Guangzhou, P.R. China + SYSU-CMU JRI, Shunde, P.R. China; Technion Institute of Technology, Haifa, Israel", "aff_domain": "weng.fr; ; ; ", "email": "weng.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/busa-fekete17a.html", "aff_unique_index": "0;1+2;3+4;2", "aff_unique_norm": "Yahoo Research;Hungarian Academy of Sciences;Technion Institute of Technology;Sun Yat-sen University;Sun Yat-sen University - Carnegie Mellon University Joint Research Institute", "aff_unique_dep": ";Research Group on AI;;School of Electronics and Information Technology;", "aff_unique_url": "https://research.yahoo.com;https://www.mta.hu;https://www.technion.ac.il;http://www.sysu.edu.cn/;", "aff_unique_abbr": "Yahoo Res.;MTA;Technion;SYSU;SYSU-CMU JRI", "aff_campus_unique_index": "0;1+2;3+4;2", "aff_campus_unique": "New York;Szeged;Haifa;Guangzhou;Shunde", "aff_country_unique_index": "0;1+2;3+3;2", "aff_country_unique": "United States;Hungary;Israel;China" }, { "title": "Multi-task Learning with Labeled and Unlabeled Tasks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/690", "id": "690", "author_site": "Anastasia Pentina, Christoph H. Lampert", "author": "Anastasia Pentina; Christoph H. Lampert", "abstract": "In multi-task learning, a learner is given a collection of prediction tasks and needs to solve all of them. In contrast to previous work, which required that annotated training data must be available for all tasks, we consider a new setting, in which for some tasks, potentially most of them, only unlabeled training data is provided. Consequently, to solve all tasks, information must be transferred between tasks with labels and tasks without labels. Focusing on an instance-based transfer method we analyze two variants of this setting: when the set of labeled tasks is fixed, and when it can be actively selected by the learner. We state and prove a generalization bound that covers both scenarios and derive from it an algorithm for making the choice of labeled tasks (in the active case) and for transferring information between the tasks in a principled way. We also illustrate the effectiveness of the algorithm on synthetic and real data.", "bibtex": "@InProceedings{pmlr-v70-pentina17a,\n title = \t {Multi-task Learning with Labeled and Unlabeled Tasks},\n author = {Anastasia Pentina and Christoph H. Lampert},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2807--2816},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pentina17a/pentina17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pentina17a.html},\n abstract = \t {In multi-task learning, a learner is given a collection of prediction tasks and needs to solve all of them. In contrast to previous work, which required that annotated training data must be available for all tasks, we consider a new setting, in which for some tasks, potentially most of them, only unlabeled training data is provided. Consequently, to solve all tasks, information must be transferred between tasks with labels and tasks without labels. Focusing on an instance-based transfer method we analyze two variants of this setting: when the set of labeled tasks is fixed, and when it can be actively selected by the learner. We state and prove a generalization bound that covers both scenarios and derive from it an algorithm for making the choice of labeled tasks (in the active case) and for transferring information between the tasks in a principled way. We also illustrate the effectiveness of the algorithm on synthetic and real data.}\n}", "pdf": "http://proceedings.mlr.press/v70/pentina17a/pentina17a.pdf", "supp": "", "pdf_size": 1998807, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3874925799299102202&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "IST Austria; IST Austria", "aff_domain": "ist.ac.at; ", "email": "ist.ac.at; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/pentina17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Institute of Science and Technology Austria", "aff_unique_dep": "", "aff_unique_url": "https://www.ist.ac.at", "aff_unique_abbr": "IST Austria", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Austria" }, { "title": "Multichannel End-to-end Speech Recognition", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/734", "id": "734", "author_site": "Tsubasa Ochiai, Shinji Watanabe, Takaaki Hori, John Hershey", "author": "Tsubasa Ochiai; Shinji Watanabe; Takaaki Hori; John R. Hershey", "abstract": "The field of speech recognition is in the midst of a paradigm shift: end-to-end neural networks are challenging the dominance of hidden Markov models as a core technology. Using an attention mechanism in a recurrent encoder-decoder architecture solves the dynamic time alignment problem, allowing joint end-to-end training of the acoustic and language modeling components. In this paper we extend the end-to-end framework to encompass microphone array signal processing for noise suppression and speech enhancement within the acoustic encoding network. This allows the beamforming components to be optimized jointly within the recognition architecture to improve the end-to-end speech recognition objective. Experiments on the noisy speech benchmarks (CHiME-4 and AMI) show that our multichannel end-to-end system outperformed the attention-based baseline with input from a conventional adaptive beamformer.", "bibtex": "@InProceedings{pmlr-v70-ochiai17a,\n title = \t {Multichannel End-to-end Speech Recognition},\n author = {Tsubasa Ochiai and Shinji Watanabe and Takaaki Hori and John R. Hershey},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2632--2641},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ochiai17a/ochiai17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ochiai17a.html},\n abstract = \t {The field of speech recognition is in the midst of a paradigm shift: end-to-end neural networks are challenging the dominance of hidden Markov models as a core technology. Using an attention mechanism in a recurrent encoder-decoder architecture solves the dynamic time alignment problem, allowing joint end-to-end training of the acoustic and language modeling components. In this paper we extend the end-to-end framework to encompass microphone array signal processing for noise suppression and speech enhancement within the acoustic encoding network. This allows the beamforming components to be optimized jointly within the recognition architecture to improve the end-to-end speech recognition objective. Experiments on the noisy speech benchmarks (CHiME-4 and AMI) show that our multichannel end-to-end system outperformed the attention-based baseline with input from a conventional adaptive beamformer.}\n}", "pdf": "http://proceedings.mlr.press/v70/ochiai17a/ochiai17a.pdf", "supp": "", "pdf_size": 2423369, "gs_citation": 129, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17516980163425320168&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 13, "aff": "Doshisha University, Kyoto, Japan; Mitsubishi Electric Research Laboratories (MERL), Cambridge, MA, USA; Mitsubishi Electric Research Laboratories (MERL), Cambridge, MA, USA; Mitsubishi Electric Research Laboratories (MERL), Cambridge, MA, USA", "aff_domain": "mail4.doshisha.ac.jp;merl.com; ; ", "email": "mail4.doshisha.ac.jp;merl.com; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/ochiai17a.html", "aff_unique_index": "0;1;1;1", "aff_unique_norm": "Doshisha University;Mitsubishi Electric Research Laboratories", "aff_unique_dep": ";", "aff_unique_url": "https://www.doshisha.ac.jp;https://www.merl.com", "aff_unique_abbr": ";MERL", "aff_campus_unique_index": "0;1;1;1", "aff_campus_unique": "Kyoto;Cambridge", "aff_country_unique_index": "0;1;1;1", "aff_country_unique": "Japan;United States" }, { "title": "Multilabel Classification with Group Testing and Codes", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/713", "id": "713", "author_site": "Shashanka Ubaru, Arya Mazumdar", "author": "Shashanka Ubaru; Arya Mazumdar", "abstract": "In recent years, the multiclass and mutlilabel classification problems we encounter in many applications have very large ($10^3$\u2013$10^6$) number of classes. However, each instance belongs to only one or few classes, i.e., the label vectors are sparse. In this work, we propose a novel approach based on group testing to solve such large multilabel classification problems with sparse label vectors. We describe various group testing constructions, and advocate the use of concatenated Reed Solomon codes and unbalanced bipartite expander graphs for extreme classification problems. The proposed approach has several advantages theoretically and practically over existing popular methods. Our method operates on the binary alphabet and can utilize the well-established binary classifiers for learning. The error correction capabilities of the codes are leveraged for the first time in the learning problem to correct prediction errors. Even if a linearly growing number of classifiers mis-classify, these errors are fully corrected. We establish Hamming loss error bounds for the approach. More importantly, our method utilizes a simple prediction algorithm and does not require matrix inversion or solving optimization problems making the algorithm very inexpensive. Numerical experiments with various datasets illustrate the superior performance of our method.", "bibtex": "@InProceedings{pmlr-v70-ubaru17a,\n title = \t {Multilabel Classification with Group Testing and Codes},\n author = {Shashanka Ubaru and Arya Mazumdar},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3492--3501},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ubaru17a/ubaru17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ubaru17a.html},\n abstract = \t {In recent years, the multiclass and mutlilabel classification problems we encounter in many applications have very large ($10^3$\u2013$10^6$) number of classes. However, each instance belongs to only one or few classes, i.e., the label vectors are sparse. In this work, we propose a novel approach based on group testing to solve such large multilabel classification problems with sparse label vectors. We describe various group testing constructions, and advocate the use of concatenated Reed Solomon codes and unbalanced bipartite expander graphs for extreme classification problems. The proposed approach has several advantages theoretically and practically over existing popular methods. Our method operates on the binary alphabet and can utilize the well-established binary classifiers for learning. The error correction capabilities of the codes are leveraged for the first time in the learning problem to correct prediction errors. Even if a linearly growing number of classifiers mis-classify, these errors are fully corrected. We establish Hamming loss error bounds for the approach. More importantly, our method utilizes a simple prediction algorithm and does not require matrix inversion or solving optimization problems making the algorithm very inexpensive. Numerical experiments with various datasets illustrate the superior performance of our method.}\n}", "pdf": "http://proceedings.mlr.press/v70/ubaru17a/ubaru17a.pdf", "supp": "", "pdf_size": 578580, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5529974668505263489&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Engineering, University of Minnesota at Twin Cities, MN USA; College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, MA, USA", "aff_domain": "umn.edu; ", "email": "umn.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/ubaru17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Minnesota;University of Massachusetts Amherst", "aff_unique_dep": "Department of Computer Science and Engineering;College of Information and Computer Sciences", "aff_unique_url": "https://www.minnesota.edu;https://www.umass.edu", "aff_unique_abbr": "UMN;UMass Amherst", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Twin Cities;Amherst", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Multilevel Clustering via Wasserstein Means", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/788", "id": "788", "author_site": "Nhat Ho, XuanLong Nguyen, Mikhail Yurochkin, Hung Bui, Viet Huynh, Dinh Phung", "author": "Nhat Ho; XuanLong Nguyen; Mikhail Yurochkin; Hung Hai Bui; Viet Huynh; Dinh Phung", "abstract": "We propose a novel approach to the problem of multilevel clustering, which aims to simultaneously partition data in each group and discover grouping patterns among groups in a potentially large hierarchically structured corpus of data. Our method involves a joint optimization formulation over several spaces of discrete probability measures, which are endowed with Wasserstein distance metrics. We propose a number of variants of this problem, which admit fast optimization algorithms, by exploiting the connection to the problem of finding Wasserstein barycenters. Consistency properties are established for the estimates of both local and global clusters. Finally, experiment results with both synthetic and real data are presented to demonstrate the flexibility and scalability of the proposed approach.", "bibtex": "@InProceedings{pmlr-v70-ho17a,\n title = \t {Multilevel Clustering via {W}asserstein Means},\n author = {Nhat Ho and XuanLong Nguyen and Mikhail Yurochkin and Hung Hai Bui and Viet Huynh and Dinh Phung},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1501--1509},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ho17a/ho17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ho17a.html},\n abstract = \t {We propose a novel approach to the problem of multilevel clustering, which aims to simultaneously partition data in each group and discover grouping patterns among groups in a potentially large hierarchically structured corpus of data. Our method involves a joint optimization formulation over several spaces of discrete probability measures, which are endowed with Wasserstein distance metrics. We propose a number of variants of this problem, which admit fast optimization algorithms, by exploiting the connection to the problem of finding Wasserstein barycenters. Consistency properties are established for the estimates of both local and global clusters. Finally, experiment results with both synthetic and real data are presented to demonstrate the flexibility and scalability of the proposed approach.}\n}", "pdf": "http://proceedings.mlr.press/v70/ho17a/ho17a.pdf", "supp": "", "pdf_size": 1281834, "gs_citation": 175, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8572512493183226380&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Statistics, University of Michigan, USA; Department of Statistics, University of Michigan, USA; Department of Statistics, University of Michigan, USA; Adobe Research; Center for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia; Center for Pattern Recognition and Data Analytics (PRaDA), Deakin University, Australia", "aff_domain": "umich.edu; ; ; ; ; ", "email": "umich.edu; ; ; ; ; ", "github": "https://github.com/moonfolk/Multilevel-Wasserstein-Means", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/ho17a.html", "aff_unique_index": "0;0;0;1;2;2", "aff_unique_norm": "University of Michigan;Adobe;Deakin University", "aff_unique_dep": "Department of Statistics;Adobe Research;Center for Pattern Recognition and Data Analytics (PRaDA)", "aff_unique_url": "https://www.umich.edu;https://research.adobe.com;https://www.deakin.edu.au", "aff_unique_abbr": "UM;Adobe;Deakin", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;1;1", "aff_country_unique": "United States;Australia" }, { "title": "Multiple Clustering Views from Multiple Uncertain Experts", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/484", "id": "484", "author_site": "Yale Chang, Junxiang Chen, Michael Cho, Peter Castaldi, Edwin Silverman, Jennifer Dy", "author": "Yale Chang; Junxiang Chen; Michael H. Cho; Peter J. Castaldi; Edwin K. Silverman; Jennifer G. Dy", "abstract": "Expert input can improve clustering performance. In today\u2019s collaborative environment, the availability of crowdsourced multiple expert input is becoming common. Given multiple experts\u2019 inputs, most existing approaches can only discover one clustering structure. However, data is multi-faced by nature and can be clustered in different ways (also known as views). In an exploratory analysis problem where ground truth is not known, different experts may have diverse views on how to cluster data. In this paper, we address the problem on how to automatically discover multiple ways to cluster data given potentially diverse inputs from multiple uncertain experts. We propose a novel Bayesian probabilistic model that automatically learns the multiple expert views and the clustering structure associated with each view. The benefits of learning the experts\u2019 views include 1) enabling the discovery of multiple diverse clustering structures, and 2) improving the quality of clustering solution in each view by assigning higher weights to experts with higher confidence. In our approach, the expert views, multiple clustering structures and expert confidences are jointly learned via variational inference. Experimental results on synthetic datasets, benchmark datasets and a real-world disease subtyping problem show that our proposed approach outperforms competing baselines, including meta clustering, semi-supervised clustering, semi-crowdsourced clustering and consensus clustering.", "bibtex": "@InProceedings{pmlr-v70-chang17a,\n title = \t {Multiple Clustering Views from Multiple Uncertain Experts},\n author = {Yale Chang and Junxiang Chen and Michael H. Cho and Peter J. Castaldi and Edwin K. Silverman and Jennifer G. Dy},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {674--683},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chang17a/chang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chang17a.html},\n abstract = \t {Expert input can improve clustering performance. In today\u2019s collaborative environment, the availability of crowdsourced multiple expert input is becoming common. Given multiple experts\u2019 inputs, most existing approaches can only discover one clustering structure. However, data is multi-faced by nature and can be clustered in different ways (also known as views). In an exploratory analysis problem where ground truth is not known, different experts may have diverse views on how to cluster data. In this paper, we address the problem on how to automatically discover multiple ways to cluster data given potentially diverse inputs from multiple uncertain experts. We propose a novel Bayesian probabilistic model that automatically learns the multiple expert views and the clustering structure associated with each view. The benefits of learning the experts\u2019 views include 1) enabling the discovery of multiple diverse clustering structures, and 2) improving the quality of clustering solution in each view by assigning higher weights to experts with higher confidence. In our approach, the expert views, multiple clustering structures and expert confidences are jointly learned via variational inference. Experimental results on synthetic datasets, benchmark datasets and a real-world disease subtyping problem show that our proposed approach outperforms competing baselines, including meta clustering, semi-supervised clustering, semi-crowdsourced clustering and consensus clustering.}\n}", "pdf": "http://proceedings.mlr.press/v70/chang17a/chang17a.pdf", "supp": "", "pdf_size": 594421, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11718233330909447788&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Northeastern University, Boston, MA; Northeastern University, Boston, MA; Brigham and Women\u2019s Hospital, Harvard Medical School, Boston, MA; Brigham and Women\u2019s Hospital, Harvard Medical School, Boston, MA; Brigham and Women\u2019s Hospital, Harvard Medical School, Boston, MA; Northeastern University, Boston, MA", "aff_domain": "coe.neu.edu; ; ; ; ; ", "email": "coe.neu.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/chang17a.html", "aff_unique_index": "0;0;1;1;1;0", "aff_unique_norm": "Northeastern University;Brigham and Women\u2019s Hospital", "aff_unique_dep": ";Harvard Medical School", "aff_unique_url": "https://www.northeastern.edu;https://www.brighamandwomens.org", "aff_unique_abbr": "NEU;BWH", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Multiplicative Normalizing Flows for Variational Bayesian Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/575", "id": "575", "author_site": "Christos Louizos, Max Welling", "author": "Christos Louizos; Max Welling", "abstract": "We reinterpret multiplicative noise in neural networks as auxiliary random variables that augment the approximate posterior in a variational setting for Bayesian neural networks. We show that through this interpretation it is both efficient and straightforward to improve the approximation by employing normalizing flows while still allowing for local reparametrizations and a tractable lower bound. In experiments we show that with this new approximation we can significantly improve upon classical mean field for Bayesian neural networks on both predictive accuracy as well as predictive uncertainty.", "bibtex": "@InProceedings{pmlr-v70-louizos17a,\n title = \t {Multiplicative Normalizing Flows for Variational {B}ayesian Neural Networks},\n author = {Christos Louizos and Max Welling},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2218--2227},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/louizos17a/louizos17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/louizos17a.html},\n abstract = \t {We reinterpret multiplicative noise in neural networks as auxiliary random variables that augment the approximate posterior in a variational setting for Bayesian neural networks. We show that through this interpretation it is both efficient and straightforward to improve the approximation by employing normalizing flows while still allowing for local reparametrizations and a tractable lower bound. In experiments we show that with this new approximation we can significantly improve upon classical mean field for Bayesian neural networks on both predictive accuracy as well as predictive uncertainty.}\n}", "pdf": "http://proceedings.mlr.press/v70/louizos17a/louizos17a.pdf", "supp": "", "pdf_size": 1136217, "gs_citation": 593, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1022009274958609654&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "University of Amsterdam, Netherlands+TNO Intelligent Imaging, Netherlands; University of Amsterdam, Netherlands+Canadian Institute For Advanced Research (CIFAR)", "aff_domain": "uva.nl; ", "email": "uva.nl; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/louizos17a.html", "aff_unique_index": "0+1;0+2", "aff_unique_norm": "University of Amsterdam;TNO;Canadian Institute for Advanced Research", "aff_unique_dep": ";Intelligent Imaging;", "aff_unique_url": "https://www.uva.nl;https://www.tno.nl;https://www.cifar.ca", "aff_unique_abbr": "UvA;TNO;CIFAR", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+1", "aff_country_unique": "Netherlands;Canada" }, { "title": "Natasha: Faster Non-Convex Stochastic Optimization via Strongly Non-Convex Parameter", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/594", "id": "594", "author": "Zeyuan Allen-Zhu", "abstract": "Given a non-convex function $f(x)$ that is an average of $n$ smooth functions, we design stochastic first-order methods to find its approximate stationary points. The performance of our new methods depend on the smallest (negative) eigenvalue $-\\sigma$ of the Hessian. This parameter $\\sigma$ captures how strongly non-convex $f(x)$ is, and is analogous to the strong convexity parameter for convex optimization. At least in theory, our methods outperform known results for a range of parameter $\\sigma$, and can also be used to find approximate local minima. Our result implies an interesting dichotomy: there exists a threshold $\\sigma_0$ so that the (currently) fastest methods for $\\sigma>\\sigma_0$ and for $\\sigma<\\sigma_0$ have different behaviors: the former scales with $n^{2/3}$ and the latter scales with $n^{3/4}$.", "bibtex": "@InProceedings{pmlr-v70-allen-zhu17a,\n title = \t {Natasha: Faster Non-Convex Stochastic Optimization via Strongly Non-Convex Parameter},\n author = {Zeyuan Allen-Zhu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {89--97},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allen-zhu17a/allen-zhu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/allen-zhu17a.html},\n abstract = \t {Given a non-convex function $f(x)$ that is an average of $n$ smooth functions, we design stochastic first-order methods to find its approximate stationary points. The performance of our new methods depend on the smallest (negative) eigenvalue $-\\sigma$ of the Hessian. This parameter $\\sigma$ captures how strongly non-convex $f(x)$ is, and is analogous to the strong convexity parameter for convex optimization. At least in theory, our methods outperform known results for a range of parameter $\\sigma$, and can also be used to find approximate local minima. Our result implies an interesting dichotomy: there exists a threshold $\\sigma_0$ so that the (currently) fastest methods for $\\sigma>\\sigma_0$ and for $\\sigma<\\sigma_0$ have different behaviors: the former scales with $n^{2/3}$ and the latter scales with $n^{3/4}$.}\n}", "pdf": "http://proceedings.mlr.press/v70/allen-zhu17a/allen-zhu17a.pdf", "supp": "", "pdf_size": 985659, "gs_citation": 102, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16148931541092203520&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Microsoft Research", "aff_domain": "csail.mit.edu", "email": "csail.mit.edu", "github": "", "project": "http://arxiv.org/abs/1702.00763", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/allen-zhu17a.html", "aff_unique_index": "0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research", "aff_unique_url": "https://www.microsoft.com/en-us/research", "aff_unique_abbr": "MSR", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Near-Optimal Design of Experiments via Regret Minimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/609", "id": "609", "author_site": "Zeyuan Allen-Zhu, Yuanzhi Li, Aarti Singh, Yining Wang", "author": "Zeyuan Allen-Zhu; Yuanzhi Li; Aarti Singh; Yining Wang", "abstract": "We consider computationally tractable methods for the experimental design problem, where k out of n design points of dimension p are selected so that certain optimality criteria are approximately satisfied. Our algorithm finds a $(1+\\epsilon)$-approximate optimal design when k is a linear function of p; in contrast, existing results require k to be super-linear in p. Our algorithm also handles all popular optimality criteria, while existing ones only handle one or two such criteria. Numerical results on synthetic and real-world design problems verify the practical effectiveness of the proposed algorithm.", "bibtex": "@InProceedings{pmlr-v70-allen-zhu17e,\n title = \t {Near-Optimal Design of Experiments via Regret Minimization},\n author = {Zeyuan Allen-Zhu and Yuanzhi Li and Aarti Singh and Yining Wang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {126--135},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/allen-zhu17e/allen-zhu17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/allen-zhu17e.html},\n abstract = \t {We consider computationally tractable methods for the experimental design problem, where k out of n design points of dimension p are selected so that certain optimality criteria are approximately satisfied. Our algorithm finds a $(1+\\epsilon)$-approximate optimal design when k is a linear function of p; in contrast, existing results require k to be super-linear in p. Our algorithm also handles all popular optimality criteria, while existing ones only handle one or two such criteria. Numerical results on synthetic and real-world design problems verify the practical effectiveness of the proposed algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v70/allen-zhu17e/allen-zhu17e.pdf", "supp": "", "pdf_size": 416572, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6264033442120298625&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Microsoft Research, Redmond, USA; Princeton University, Princeton, USA; Carnegie Mellon University, Pittsburgh, USA; Carnegie Mellon University, Pittsburgh, USA", "aff_domain": "cs.cmu.edu; ; ;cs.cmu.edu", "email": "cs.cmu.edu; ; ;cs.cmu.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/allen-zhu17e.html", "aff_unique_index": "0;1;2;2", "aff_unique_norm": "Microsoft;Princeton University;Carnegie Mellon University", "aff_unique_dep": "Microsoft Research;;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.princeton.edu;https://www.cmu.edu", "aff_unique_abbr": "MSR;Princeton;CMU", "aff_campus_unique_index": "0;1;2;2", "aff_campus_unique": "Redmond;Princeton;Pittsburgh", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Nearly Optimal Robust Matrix Completion", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/772", "id": "772", "author_site": "Yeshwanth Cherapanamjeri, Prateek Jain, Kartik Gupta", "author": "Yeshwanth Cherapanamjeri; Kartik Gupta; Prateek Jain", "abstract": "In this paper, we consider the problem of Robust Matrix Completion (RMC) where the goal is to recover a low-rank matrix by observing a small number of its entries out of which a few can be arbitrarily corrupted. We propose a simple projected gradient descent-based method to estimate the low-rank matrix that alternately performs a projected gradient descent step and cleans up a few of the corrupted entries using hard-thresholding. Our algorithm solves RMC using nearly optimal number of observations while tolerating a nearly optimal number of corruptions. Our result also implies significant improvement over the existing time complexity bounds for the low-rank matrix completion problem. Finally, an application of our result to the robust PCA problem (low-rank+sparse matrix separation) leads to nearly linear time (in matrix dimensions) algorithm for the same; existing state-of-the-art methods require quadratic time. Our empirical results corroborate our theoretical results and show that even for moderate sized problems, our method for robust PCA is an order of magnitude faster than the existing methods.", "bibtex": "@InProceedings{pmlr-v70-cherapanamjeri17a,\n title = \t {Nearly Optimal Robust Matrix Completion},\n author = {Yeshwanth Cherapanamjeri and Kartik Gupta and Prateek Jain},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {797--805},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cherapanamjeri17a/cherapanamjeri17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cherapanamjeri17a.html},\n abstract = \t {In this paper, we consider the problem of Robust Matrix Completion (RMC) where the goal is to recover a low-rank matrix by observing a small number of its entries out of which a few can be arbitrarily corrupted. We propose a simple projected gradient descent-based method to estimate the low-rank matrix that alternately performs a projected gradient descent step and cleans up a few of the corrupted entries using hard-thresholding. Our algorithm solves RMC using nearly optimal number of observations while tolerating a nearly optimal number of corruptions. Our result also implies significant improvement over the existing time complexity bounds for the low-rank matrix completion problem. Finally, an application of our result to the robust PCA problem (low-rank+sparse matrix separation) leads to nearly linear time (in matrix dimensions) algorithm for the same; existing state-of-the-art methods require quadratic time. Our empirical results corroborate our theoretical results and show that even for moderate sized problems, our method for robust PCA is an order of magnitude faster than the existing methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/cherapanamjeri17a/cherapanamjeri17a.pdf", "supp": "", "pdf_size": 461974, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17633026662909116183&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Microsoft Research India; Microsoft Research India; Microsoft Research India", "aff_domain": "microsoft.com;microsoft.com;microsoft.com", "email": "microsoft.com;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/cherapanamjeri17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Microsoft", "aff_unique_dep": "Microsoft Research India", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/microsoft-research-india", "aff_unique_abbr": "MSR India", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "Neural Audio Synthesis of Musical Notes with WaveNet Autoencoders", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/868", "id": "868", "author_site": "Cinjon Resnick, Adam Roberts, Jesse Engel, Douglas Eck, Sander Dieleman, Karen Simonyan, Mohammad Norouzi", "author": "Jesse Engel; Cinjon Resnick; Adam Roberts; Sander Dieleman; Mohammad Norouzi; Douglas Eck; Karen Simonyan", "abstract": "Generative models in vision have seen rapid progress due to algorithmic improvements and the availability of high-quality image datasets. In this paper, we offer contributions in both these areas to enable similar progress in audio modeling. First, we detail a powerful new WaveNet-style autoencoder model that conditions an autoregressive decoder on temporal codes learned from the raw audio waveform. Second, we introduce NSynth, a large-scale and high-quality dataset of musical notes that is an order of magnitude larger than comparable public datasets. Using NSynth, we demonstrate improved qualitative and quantitative performance of the WaveNet autoencoder over a well-tuned spectral autoencoder baseline. Finally, we show that the model learns a manifold of embeddings that allows for morphing between instruments, meaningfully interpolating in timbre to create new types of sounds that are realistic and expressive.", "bibtex": "@InProceedings{pmlr-v70-engel17a,\n title = \t {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},\n author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1068--1077},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/engel17a.html},\n abstract = \t {Generative models in vision have seen rapid progress due to algorithmic improvements and the availability of high-quality image datasets. In this paper, we offer contributions in both these areas to enable similar progress in audio modeling. First, we detail a powerful new WaveNet-style autoencoder model that conditions an autoregressive decoder on temporal codes learned from the raw audio waveform. Second, we introduce NSynth, a large-scale and high-quality dataset of musical notes that is an order of magnitude larger than comparable public datasets. Using NSynth, we demonstrate improved qualitative and quantitative performance of the WaveNet autoencoder over a well-tuned spectral autoencoder baseline. Finally, we show that the model learns a manifold of embeddings that allows for morphing between instruments, meaningfully interpolating in timbre to create new types of sounds that are realistic and expressive.}\n}", "pdf": "http://proceedings.mlr.press/v70/engel17a/engel17a.pdf", "supp": "", "pdf_size": 3792940, "gs_citation": 827, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5597311454772920979&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Google Brain; Google Brain; Google Brain; DeepMind; Google Brain; Google Brain; DeepMind", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/engel17a.html", "aff_unique_index": "0;0;0;1;0;0;1", "aff_unique_norm": "Google;DeepMind", "aff_unique_dep": "Google Brain;", "aff_unique_url": "https://brain.google.com;https://deepmind.com", "aff_unique_abbr": "Google Brain;DeepMind", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;0;0;1;0;0;1", "aff_country_unique": "United States;United Kingdom" }, { "title": "Neural Episodic Control", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/745", "id": "745", "author_site": "Alexander Pritzel, Benigno Uria, Srinivasan Sriram, Adri\u00e0 Puigdomenech Badia, Oriol Vinyals, Demis Hassabis, Daan Wierstra, Charles Blundell", "author": "Alexander Pritzel; Benigno Uria; Sriram Srinivasan; Adri\u00e0 Puigdom\u00e8nech Badia; Oriol Vinyals; Demis Hassabis; Daan Wierstra; Charles Blundell", "abstract": "Deep reinforcement learning methods attain super-human performance in a wide range of environments. Such methods are grossly inefficient, often taking orders of magnitudes more data than humans to achieve reasonable performance. We propose Neural Episodic Control: a deep reinforcement learning agent that is able to rapidly assimilate new experiences and act upon them. Our agent uses a semi-tabular representation of the value function: a buffer of past experience containing slowly changing state representations and rapidly updated estimates of the value function. We show across a wide range of environments that our agent learns significantly faster than other state-of-the-art, general purpose deep reinforcement learning agents.", "bibtex": "@InProceedings{pmlr-v70-pritzel17a,\n title = \t {Neural Episodic Control},\n author = {Alexander Pritzel and Benigno Uria and Sriram Srinivasan and Adri{\\`a} Puigdom{\\`e}nech Badia and Oriol Vinyals and Demis Hassabis and Daan Wierstra and Charles Blundell},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2827--2836},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pritzel17a/pritzel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pritzel17a.html},\n abstract = \t {Deep reinforcement learning methods attain super-human performance in a wide range of environments. Such methods are grossly inefficient, often taking orders of magnitudes more data than humans to achieve reasonable performance. We propose Neural Episodic Control: a deep reinforcement learning agent that is able to rapidly assimilate new experiences and act upon them. Our agent uses a semi-tabular representation of the value function: a buffer of past experience containing slowly changing state representations and rapidly updated estimates of the value function. We show across a wide range of environments that our agent learns significantly faster than other state-of-the-art, general purpose deep reinforcement learning agents.}\n}", "pdf": "http://proceedings.mlr.press/v70/pritzel17a/pritzel17a.pdf", "supp": "", "pdf_size": 2638870, "gs_citation": 448, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11072220347720997469&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK; Deepmind, London, UK", "aff_domain": "google.com; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/pritzel17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Neural Message Passing for Quantum Chemistry", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/529", "id": "529", "author_site": "Justin Gilmer, Samuel Schoenholz, Patrick F Riley, Oriol Vinyals, George Dahl", "author": "Justin Gilmer; Samuel S. Schoenholz; Patrick F. Riley; Oriol Vinyals; George E. Dahl", "abstract": "Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.", "bibtex": "@InProceedings{pmlr-v70-gilmer17a,\n title = \t {Neural Message Passing for Quantum Chemistry},\n author = {Justin Gilmer and Samuel S. Schoenholz and Patrick F. Riley and Oriol Vinyals and George E. Dahl},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1263--1272},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gilmer17a/gilmer17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gilmer17a.html},\n abstract = \t {Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.}\n}", "pdf": "http://proceedings.mlr.press/v70/gilmer17a/gilmer17a.pdf", "supp": "", "pdf_size": 409424, "gs_citation": 10347, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6135306581977403485&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Google Brain; Google Brain; Google; Google DeepMind; Google Brain", "aff_domain": "google.com; ; ; ;google.com", "email": "google.com; ; ; ;google.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/gilmer17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Mountain View;", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "Neural Networks and Rational Functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/761", "id": "761", "author": "Matus Telgarsky", "abstract": "Neural networks and rational functions efficiently approximate each other. In more detail, it is shown here that for any ReLU network, there exists a rational function of degree $O(polylog(1/\\epsilon))$ which is $\\epsilon$-close, and similarly for any rational function there exists a ReLU network of size $O(polylog(1/\\epsilon))$ which is $\\epsilon$-close. By contrast, polynomials need degree $\\Omega(poly(1/\\epsilon))$ to approximate even a single ReLU. When converting a ReLU network to a rational function as above, the hidden constants depend exponentially on the number of layers, which is shown to be tight; in other words, a compositional representation can be beneficial even for rational functions.", "bibtex": "@InProceedings{pmlr-v70-telgarsky17a,\n title = \t {Neural Networks and Rational Functions},\n author = {Matus Telgarsky},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3387--3393},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/telgarsky17a/telgarsky17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/telgarsky17a.html},\n abstract = \t {Neural networks and rational functions efficiently approximate each other. In more detail, it is shown here that for any ReLU network, there exists a rational function of degree $O(polylog(1/\\epsilon))$ which is $\\epsilon$-close, and similarly for any rational function there exists a ReLU network of size $O(polylog(1/\\epsilon))$ which is $\\epsilon$-close. By contrast, polynomials need degree $\\Omega(poly(1/\\epsilon))$ to approximate even a single ReLU. When converting a ReLU network to a rational function as above, the hidden constants depend exponentially on the number of layers, which is shown to be tight; in other words, a compositional representation can be beneficial even for rational functions.}\n}", "pdf": "http://proceedings.mlr.press/v70/telgarsky17a/telgarsky17a.pdf", "supp": "", "pdf_size": 1030841, "gs_citation": 111, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5402236297069824698&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "", "aff_domain": "", "email": "", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/telgarsky17a.html" }, { "title": "Neural Optimizer Search with Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/842", "id": "842", "author_site": "Irwan Bello, Barret Zoph, Vijay Vasudevan, Quoc Le", "author": "Irwan Bello; Barret Zoph; Vijay Vasudevan; Quoc V. Le", "abstract": "We present an approach to automate the process of discovering optimization methods, with a focus on deep learning architectures. We train a Recurrent Neural Network controller to generate a string in a specific domain language that describes a mathematical update equation based on a list of primitive functions, such as the gradient, running average of the gradient, etc. The controller is trained with Reinforcement Learning to maximize the performance of a model after a few epochs. On CIFAR-10, our method discovers several update rules that are better than many commonly used optimizers, such as Adam, RMSProp, or SGD with and without Momentum on a ConvNet model. These optimizers can also be transferred to perform well on different neural network architectures, including Google\u2019s neural machine translation system.", "bibtex": "@InProceedings{pmlr-v70-bello17a,\n title = \t {Neural Optimizer Search with Reinforcement Learning},\n author = {Irwan Bello and Barret Zoph and Vijay Vasudevan and Quoc V. Le},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {459--468},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bello17a/bello17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bello17a.html},\n abstract = \t {We present an approach to automate the process of discovering optimization methods, with a focus on deep learning architectures. We train a Recurrent Neural Network controller to generate a string in a specific domain language that describes a mathematical update equation based on a list of primitive functions, such as the gradient, running average of the gradient, etc. The controller is trained with Reinforcement Learning to maximize the performance of a model after a few epochs. On CIFAR-10, our method discovers several update rules that are better than many commonly used optimizers, such as Adam, RMSProp, or SGD with and without Momentum on a ConvNet model. These optimizers can also be transferred to perform well on different neural network architectures, including Google\u2019s neural machine translation system.}\n}", "pdf": "http://proceedings.mlr.press/v70/bello17a/bello17a.pdf", "supp": "", "pdf_size": 1256965, "gs_citation": 482, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6173568198016594588&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Google Brain; Google Brain; Google Brain; Google Brain", "aff_domain": "google.com;google.com;google.com;google.com", "email": "google.com;google.com;google.com;google.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bello17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Neural Taylor Approximations: Convergence and Exploration in Rectifier Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/600", "id": "600", "author_site": "David Balduzzi, Brian McWilliams, Tony Butler-Yeoman", "author": "David Balduzzi; Brian McWilliams; Tony Butler-Yeoman", "abstract": "Modern convolutional networks, incorporating rectifiers and max-pooling, are neither smooth nor convex; standard guarantees therefore do not apply. Nevertheless, methods from convex optimization such as gradient descent and Adam are widely used as building blocks for deep learning algorithms. This paper provides the first convergence guarantee applicable to modern convnets, which furthermore matches a lower bound for convex nonsmooth functions. The key technical tool is the neural Taylor approximation \u2013 a straightforward application of Taylor expansions to neural networks \u2013 and the associated Taylor loss. Experiments on a range of optimizers, layers, and tasks provide evidence that the analysis accurately captures the dynamics of neural optimization. The second half of the paper applies the Taylor approximation to isolate the main difficulty in training rectifier nets \u2013 that gradients are shattered \u2013 and investigates the hypothesis that, by exploring the space of activation configurations more thoroughly, adaptive optimizers such as RMSProp and Adam are able to converge to better solutions.", "bibtex": "@InProceedings{pmlr-v70-balduzzi17c,\n title = \t {Neural Taylor Approximations: Convergence and Exploration in Rectifier Networks},\n author = {David Balduzzi and Brian McWilliams and Tony Butler-Yeoman},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {351--360},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balduzzi17c/balduzzi17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/balduzzi17c.html},\n abstract = \t {Modern convolutional networks, incorporating rectifiers and max-pooling, are neither smooth nor convex; standard guarantees therefore do not apply. Nevertheless, methods from convex optimization such as gradient descent and Adam are widely used as building blocks for deep learning algorithms. This paper provides the first convergence guarantee applicable to modern convnets, which furthermore matches a lower bound for convex nonsmooth functions. The key technical tool is the neural Taylor approximation \u2013 a straightforward application of Taylor expansions to neural networks \u2013 and the associated Taylor loss. Experiments on a range of optimizers, layers, and tasks provide evidence that the analysis accurately captures the dynamics of neural optimization. The second half of the paper applies the Taylor approximation to isolate the main difficulty in training rectifier nets \u2013 that gradients are shattered \u2013 and investigates the hypothesis that, by exploring the space of activation configurations more thoroughly, adaptive optimizers such as RMSProp and Adam are able to converge to better solutions.}\n}", "pdf": "http://proceedings.mlr.press/v70/balduzzi17c/balduzzi17c.pdf", "supp": "", "pdf_size": 1568211, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15651118293090411832&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Victoria University of Wellington, New Zealand; Disney Research, Z\u00fcrich, Switzerland; Victoria University of Wellington, New Zealand", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/balduzzi17c.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Victoria University of Wellington;Disney Research", "aff_unique_dep": ";", "aff_unique_url": "https://www.victoria.ac.nz;https://research.disney.com", "aff_unique_abbr": "VUW;Disney Research", "aff_campus_unique_index": "1", "aff_campus_unique": ";Z\u00fcrich", "aff_country_unique_index": "0;1;0", "aff_country_unique": "New Zealand;Switzerland" }, { "title": "No Spurious Local Minima in Nonconvex Low Rank Problems: A Unified Geometric Analysis", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/777", "id": "777", "author_site": "Rong Ge, Chi Jin, Yi Zheng", "author": "Rong Ge; Chi Jin; Yi Zheng", "abstract": "In this paper we develop a new framework that captures the common landscape underlying the common non-convex low-rank matrix problems including matrix sensing, matrix completion and robust PCA. In particular, we show for all above problems (including asymmetric cases): 1) all local minima are also globally optimal; 2) no high-order saddle points exists. These results explain why simple algorithms such as stochastic gradient descent have global converge, and efficiently optimize these non-convex objective functions in practice. Our framework connects and simplifies the existing analyses on optimization landscapes for matrix sensing and symmetric matrix completion. The framework naturally leads to new results for asymmetric matrix completion and robust PCA.", "bibtex": "@InProceedings{pmlr-v70-ge17a,\n title = \t {No Spurious Local Minima in Nonconvex Low Rank Problems: A Unified Geometric Analysis},\n author = {Rong Ge and Chi Jin and Yi Zheng},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1233--1242},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ge17a/ge17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ge17a.html},\n abstract = \t {In this paper we develop a new framework that captures the common landscape underlying the common non-convex low-rank matrix problems including matrix sensing, matrix completion and robust PCA. In particular, we show for all above problems (including asymmetric cases): 1) all local minima are also globally optimal; 2) no high-order saddle points exists. These results explain why simple algorithms such as stochastic gradient descent have global converge, and efficiently optimize these non-convex objective functions in practice. Our framework connects and simplifies the existing analyses on optimization landscapes for matrix sensing and symmetric matrix completion. The framework naturally leads to new results for asymmetric matrix completion and robust PCA.}\n}", "pdf": "http://proceedings.mlr.press/v70/ge17a/ge17a.pdf", "supp": "", "pdf_size": 370615, "gs_citation": 554, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7908586163390330077&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Duke University; UC Berkeley; Duke University", "aff_domain": "cs.duke.edu;cs.berkeley.edu; ", "email": "cs.duke.edu;cs.berkeley.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ge17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Duke University;University of California, Berkeley", "aff_unique_dep": ";", "aff_unique_url": "https://www.duke.edu;https://www.berkeley.edu", "aff_unique_abbr": "Duke;UC Berkeley", "aff_campus_unique_index": "1", "aff_campus_unique": ";Berkeley", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Nonnegative Matrix Factorization for Time Series Recovery From a Few Temporal Aggregates", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/522", "id": "522", "author_site": "Jiali Mei, Yohann De Castro, Yannig Goude, Georges H\u00e9brail", "author": "Jiali Mei; Yohann De Castro; Yannig Goude; Georges H\u00e9brail", "abstract": "Motivated by electricity consumption reconstitution, we propose a new matrix recovery method using nonnegative matrix factorization (NMF). The task tackled here is to reconstitute electricity consumption time series at a fine temporal scale from measures that are temporal aggregates of individual consumption. Contrary to existing NMF algorithms, the proposed method uses temporal aggregates as input data, instead of matrix entries. Furthermore, the proposed method is extended to take into account individual autocorrelation to provide better estimation, using a recent convex relaxation of quadratically constrained quadratic programs. Extensive experiments on synthetic and real-world electricity consumption datasets illustrate the effectiveness of the proposed method.", "bibtex": "@InProceedings{pmlr-v70-mei17a,\n title = \t {Nonnegative Matrix Factorization for Time Series Recovery From a Few Temporal Aggregates},\n author = {Jiali Mei and De Castro, Yohann and Yannig Goude and Georges H{\\'e}brail},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2382--2390},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mei17a/mei17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mei17a.html},\n abstract = \t {Motivated by electricity consumption reconstitution, we propose a new matrix recovery method using nonnegative matrix factorization (NMF). The task tackled here is to reconstitute electricity consumption time series at a fine temporal scale from measures that are temporal aggregates of individual consumption. Contrary to existing NMF algorithms, the proposed method uses temporal aggregates as input data, instead of matrix entries. Furthermore, the proposed method is extended to take into account individual autocorrelation to provide better estimation, using a recent convex relaxation of quadratically constrained quadratic programs. Extensive experiments on synthetic and real-world electricity consumption datasets illustrate the effectiveness of the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v70/mei17a/mei17a.pdf", "supp": "", "pdf_size": 322637, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11619129117760936448&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "EDF Lab Paris-Saclay, 91120 Palaiseau, France+LMO, Univ. Paris-Sud, CNRS, Universite Paris-Saclay, 91405 Orsay, France; EDF Lab Paris-Saclay, 91120 Palaiseau, France; EDF Lab Paris-Saclay, 91120 Palaiseau, France+LMO, Univ. Paris-Sud, CNRS, Universite Paris-Saclay, 91405 Orsay, France; LMO, Univ. Paris-Sud, CNRS, Universite Paris-Saclay, 91405 Orsay, France", "aff_domain": "u-psud.fr; ; ; ", "email": "u-psud.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/mei17a.html", "aff_unique_index": "0+1;0;0+1;1", "aff_unique_norm": "EDF Lab Paris-Saclay;Universite Paris-Sud", "aff_unique_dep": ";LMO", "aff_unique_url": "https://www.edf.com;https://www.universite-paris-sud.fr", "aff_unique_abbr": "EDF Lab;UPS", "aff_campus_unique_index": "0+1;0;0+1;1", "aff_campus_unique": "Paris-Saclay;Orsay", "aff_country_unique_index": "0+0;0;0+0;0", "aff_country_unique": "France" }, { "title": "Nonparanormal Information Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/805", "id": "805", "author_site": "Shashank Singh, Barnab\u00e1s P\u00f3czos", "author": "Shashank Singh; Barnab\u00e1s P\u00f3czos", "abstract": "We study the problem of using i.i.d. samples from an unknown multivariate probability distribution p to estimate the mutual information of p. This problem has recently received attention in two settings: (1) where p is assumed to be Gaussian and (2) where p is assumed only to lie in a large nonparametric smoothness class. Estimators proposed for the Gaussian case converge in high dimensions when the Gaussian assumption holds, but are brittle, failing dramatically when p is not Gaussian, while estimators proposed for the nonparametric case fail to converge with realistic sample sizes except in very low dimension. Hence, there is a lack of robust mutual information estimators for many realistic data. To address this, we propose estimators for mutual information when p is assumed to be a nonparanormal (or Gaussian copula) model, a semiparametric compromise between Gaussian and nonparametric extremes. Using theoretical bounds and experiments, we show these estimators strike a practical balance between robustness and scalability.", "bibtex": "@InProceedings{pmlr-v70-singh17a,\n title = \t {Nonparanormal Information Estimation},\n author = {Shashank Singh and Barnab{\\'a}s P{\\'o}czos},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3210--3219},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/singh17a/singh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/singh17a.html},\n abstract = \t {We study the problem of using i.i.d. samples from an unknown multivariate probability distribution p to estimate the mutual information of p. This problem has recently received attention in two settings: (1) where p is assumed to be Gaussian and (2) where p is assumed only to lie in a large nonparametric smoothness class. Estimators proposed for the Gaussian case converge in high dimensions when the Gaussian assumption holds, but are brittle, failing dramatically when p is not Gaussian, while estimators proposed for the nonparametric case fail to converge with realistic sample sizes except in very low dimension. Hence, there is a lack of robust mutual information estimators for many realistic data. To address this, we propose estimators for mutual information when p is assumed to be a nonparanormal (or Gaussian copula) model, a semiparametric compromise between Gaussian and nonparametric extremes. Using theoretical bounds and experiments, we show these estimators strike a practical balance between robustness and scalability.}\n}", "pdf": "http://proceedings.mlr.press/v70/singh17a/singh17a.pdf", "supp": "", "pdf_size": 387549, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3571364177100782216&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "andrew.cmu.edu; ", "email": "andrew.cmu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/singh17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Nystr\u00f6m Method with Kernel K-means++ Samples as Landmarks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/595", "id": "595", "author_site": "Dino Oglic, Thomas Gaertner", "author": "Dino Oglic; Thomas G\u00e4rtner", "abstract": "We investigate, theoretically and empirically, the effectiveness of kernel K-means++ samples as landmarks in the Nystr\u00f6m method for low-rank approximation of kernel matrices. Previous empirical studies (Zhang et al., 2008; Kumar et al.,2012) observe that the landmarks obtained using (kernel) K-means clustering define a good low-rank approximation of kernel matrices. However, the existing work does not provide a theoretical guarantee on the approximation error for this approach to landmark selection. We close this gap and provide the first bound on the approximation error of the Nystr\u00f6m method with kernel K-means++ samples as landmarks. Moreover, for the frequently used Gaussian kernel we provide a theoretically sound motivation for performing Lloyd refinements of kernel K-means++ landmarks in the instance space. We substantiate our theoretical results empirically by comparing the approach to several state-of-the-art algorithms.", "bibtex": "@InProceedings{pmlr-v70-oglic17a,\n title = \t {{N}ystr{\\\"o}m Method with Kernel K-means++ Samples as Landmarks},\n author = {Dino Oglic and Thomas G{\\\"a}rtner},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2652--2660},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/oglic17a/oglic17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/oglic17a.html},\n abstract = \t {We investigate, theoretically and empirically, the effectiveness of kernel K-means++ samples as landmarks in the Nystr\u00f6m method for low-rank approximation of kernel matrices. Previous empirical studies (Zhang et al., 2008; Kumar et al.,2012) observe that the landmarks obtained using (kernel) K-means clustering define a good low-rank approximation of kernel matrices. However, the existing work does not provide a theoretical guarantee on the approximation error for this approach to landmark selection. We close this gap and provide the first bound on the approximation error of the Nystr\u00f6m method with kernel K-means++ samples as landmarks. Moreover, for the frequently used Gaussian kernel we provide a theoretically sound motivation for performing Lloyd refinements of kernel K-means++ landmarks in the instance space. We substantiate our theoretical results empirically by comparing the approach to several state-of-the-art algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/oglic17a/oglic17a.pdf", "supp": "", "pdf_size": 425361, "gs_citation": 51, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10432520224825991828&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Institut f \u00a8ur Informatik III, Universit \u00a8at Bonn, Germany+School of Computer Science, The University of Nottingham, United Kingdom; School of Computer Science, The University of Nottingham, United Kingdom", "aff_domain": "uni-bonn.de; ", "email": "uni-bonn.de; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/oglic17a.html", "aff_unique_index": "0+1;1", "aff_unique_norm": "Universit\u00e4t Bonn;University of Nottingham", "aff_unique_dep": "Institut f\u00fcr Informatik III;School of Computer Science", "aff_unique_url": "https://www.uni-bonn.de;https://www.nottingham.ac.uk", "aff_unique_abbr": ";Nottingham", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+1;1", "aff_country_unique": "Germany;United Kingdom" }, { "title": "On Approximation Guarantees for Greedy Low Rank Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/877", "id": "877", "author_site": "RAJIV KHANNA, Ethan R. Elenberg, Alexandros Dimakis, Joydeep Ghosh, Sahand Negahban", "author": "Rajiv Khanna; Ethan R. Elenberg; Alexandros G. Dimakis; Joydeep Ghosh; Sahand Negahban", "abstract": "We provide new approximation guarantees for greedy low rank matrix estimation under standard assumptions of restricted strong convexity and smoothness. Our novel analysis also uncovers previously unknown connections between the low rank estimation and combinatorial optimization, so much so that our bounds are reminiscent of corresponding approximation bounds in submodular maximization. Additionally, we provide also provide statistical recovery guarantees. Finally, we present empirical comparison of greedy estimation with established baselines on two important real-world problems.", "bibtex": "@InProceedings{pmlr-v70-khanna17a,\n title = \t {On Approximation Guarantees for Greedy Low Rank Optimization},\n author = {Rajiv Khanna and Ethan R. Elenberg and Alexandros G. Dimakis and Joydeep Ghosh and Sahand Negahban},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1837--1846},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/khanna17a/khanna17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/khanna17a.html},\n abstract = \t {We provide new approximation guarantees for greedy low rank matrix estimation under standard assumptions of restricted strong convexity and smoothness. Our novel analysis also uncovers previously unknown connections between the low rank estimation and combinatorial optimization, so much so that our bounds are reminiscent of corresponding approximation bounds in submodular maximization. Additionally, we provide also provide statistical recovery guarantees. Finally, we present empirical comparison of greedy estimation with established baselines on two important real-world problems.}\n}", "pdf": "http://proceedings.mlr.press/v70/khanna17a/khanna17a.pdf", "supp": "", "pdf_size": 1567857, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4886205846849635827&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "UT Austin; UT Austin; UT Austin; UT Austin; Yale University", "aff_domain": "utexas.edu; ; ; ; ", "email": "utexas.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/khanna17a.html", "aff_unique_index": "0;0;0;0;1", "aff_unique_norm": "University of Texas at Austin;Yale University", "aff_unique_dep": ";", "aff_unique_url": "https://www.utexas.edu;https://www.yale.edu", "aff_unique_abbr": "UT Austin;Yale", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Austin;", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "On Calibration of Modern Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/808", "id": "808", "author_site": "Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Weinberger", "author": "Chuan Guo; Geoff Pleiss; Yu Sun; Kilian Q. Weinberger", "abstract": "Confidence calibration \u2013 the problem of predicting probability estimates representative of the true correctness likelihood \u2013 is important for classification models in many applications. We discover that modern neural networks, unlike those from a decade ago, are poorly calibrated. Through extensive experiments, we observe that depth, width, weight decay, and Batch Normalization are important factors influencing calibration. We evaluate the performance of various post-processing calibration methods on state-of-the-art architectures with image and document classification datasets. Our analysis and experiments not only offer insights into neural network learning, but also provide a simple and straightforward recipe for practical settings: on most datasets, temperature scaling \u2013 a single-parameter variant of Platt Scaling \u2013 is surprisingly effective at calibrating predictions.", "bibtex": "@InProceedings{pmlr-v70-guo17a,\n title = \t {On Calibration of Modern Neural Networks},\n author = {Chuan Guo and Geoff Pleiss and Yu Sun and Kilian Q. Weinberger},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1321--1330},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/guo17a/guo17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/guo17a.html},\n abstract = \t {Confidence calibration \u2013 the problem of predicting probability estimates representative of the true correctness likelihood \u2013 is important for classification models in many applications. We discover that modern neural networks, unlike those from a decade ago, are poorly calibrated. Through extensive experiments, we observe that depth, width, weight decay, and Batch Normalization are important factors influencing calibration. We evaluate the performance of various post-processing calibration methods on state-of-the-art architectures with image and document classification datasets. Our analysis and experiments not only offer insights into neural network learning, but also provide a simple and straightforward recipe for practical settings: on most datasets, temperature scaling \u2013 a single-parameter variant of Platt Scaling \u2013 is surprisingly effective at calibrating predictions.}\n}", "pdf": "http://proceedings.mlr.press/v70/guo17a/guo17a.pdf", "supp": "", "pdf_size": 828420, "gs_citation": 7417, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13350219683390288487&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 8, "aff": "Cornell University; Cornell University; Cornell University; Cornell University", "aff_domain": "cornell.edu;cs.cornell.edu;cornell.edu; ", "email": "cornell.edu;cs.cornell.edu;cornell.edu; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/guo17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "On Context-Dependent Clustering of Bandits", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/583", "id": "583", "author_site": "Claudio Gentile, Shuai Li, Purushottam Kar, Alexandros Karatzoglou, Giovanni Zappella, Evans Etrue Howard", "author": "Claudio Gentile; Shuai Li; Purushottam Kar; Alexandros Karatzoglou; Giovanni Zappella; Evans Etrue", "abstract": "We investigate a novel cluster-of-bandit algorithm CAB for collaborative recommendation tasks that implements the underlying feedback sharing mechanism by estimating user neighborhoods in a context-dependent manner. CAB makes sharp departures from the state of the art by incorporating collaborative effects into inference, as well as learning processes in a manner that seamlessly interleaves explore-exploit tradeoffs and collaborative steps. We prove regret bounds for CAB under various data-dependent assumptions which exhibit a crisp dependence on the expected number of clusters over the users, a natural measure of the statistical difficulty of the learning task. Experiments on production and real-world datasets show that CAB offers significantly increased prediction performance against a representative pool of state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v70-gentile17a,\n title = \t {On Context-Dependent Clustering of Bandits},\n author = {Claudio Gentile and Shuai Li and Purushottam Kar and Alexandros Karatzoglou and Giovanni Zappella and Evans Etrue},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1253--1262},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gentile17a/gentile17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gentile17a.html},\n abstract = \t {We investigate a novel cluster-of-bandit algorithm CAB for collaborative recommendation tasks that implements the underlying feedback sharing mechanism by estimating user neighborhoods in a context-dependent manner. CAB makes sharp departures from the state of the art by incorporating collaborative effects into inference, as well as learning processes in a manner that seamlessly interleaves explore-exploit tradeoffs and collaborative steps. We prove regret bounds for CAB under various data-dependent assumptions which exhibit a crisp dependence on the expected number of clusters over the users, a natural measure of the statistical difficulty of the learning task. Experiments on production and real-world datasets show that CAB offers significantly increased prediction performance against a representative pool of state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/gentile17a/gentile17a.pdf", "supp": "", "pdf_size": 547919, "gs_citation": 168, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17481356457368604545&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "DiSTA, University of Insubria, Italy; University of Cambridge, United Kingdom; IIT Kanpur, India; Telefonica Research, Spain; Amazon Dev Center, Germany+University of Milan, Italy; DiSTA, University of Insubria, Italy", "aff_domain": "uninsubria.it; ; ; ; ; ", "email": "uninsubria.it; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/gentile17a.html", "aff_unique_index": "0;1;2;3;4+5;0", "aff_unique_norm": "University of Insubria;University of Cambridge;Indian Institute of Technology Kanpur;Telefonica Research;Amazon;University of Milan", "aff_unique_dep": "DiSTA;;;;Amazon Dev Center;", "aff_unique_url": "https://www.uninsubria.it;https://www.cam.ac.uk;https://www.iitk.ac.in;https://www.telefonica.com/research;https://www.amazon.de;https://www.unimi.it", "aff_unique_abbr": ";Cambridge;IITK;Telefonica Research;Amazon;UniMi", "aff_campus_unique_index": "1;", "aff_campus_unique": ";Cambridge", "aff_country_unique_index": "0;1;2;3;4+0;0", "aff_country_unique": "Italy;United Kingdom;India;Spain;Germany" }, { "title": "On Kernelized Multi-armed Bandits", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/779", "id": "779", "author_site": "Sayak Ray Chowdhury, Aditya Gopalan", "author": "Sayak Ray Chowdhury; Aditya Gopalan", "abstract": "We consider the stochastic bandit problem with a continuous set of arms, with the expected reward function over the arms assumed to be fixed but unknown. We provide two new Gaussian process-based algorithms for continuous bandit optimization \u2013 Improved GP-UCB (IGP-UCB) and GP-Thomson sampling (GP-TS), and derive corresponding regret bounds. Specifically, the bounds hold when the expected reward function belongs to the reproducing kernel Hilbert space (RKHS) that naturally corresponds to a Gaussian process kernel used as input by the algorithms. Along the way, we derive a new self-normalized concentration inequality for vector-valued martingales of arbitrary, possibly infinite, dimension. Finally, experimental evaluation and comparisons to existing algorithms on synthetic and real-world environments are carried out that highlight the favourable gains of the proposed strategies in many cases.", "bibtex": "@InProceedings{pmlr-v70-chowdhury17a,\n title = \t {On Kernelized Multi-armed Bandits},\n author = {Sayak Ray Chowdhury and Aditya Gopalan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {844--853},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chowdhury17a/chowdhury17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chowdhury17a.html},\n abstract = \t {We consider the stochastic bandit problem with a continuous set of arms, with the expected reward function over the arms assumed to be fixed but unknown. We provide two new Gaussian process-based algorithms for continuous bandit optimization \u2013 Improved GP-UCB (IGP-UCB) and GP-Thomson sampling (GP-TS), and derive corresponding regret bounds. Specifically, the bounds hold when the expected reward function belongs to the reproducing kernel Hilbert space (RKHS) that naturally corresponds to a Gaussian process kernel used as input by the algorithms. Along the way, we derive a new self-normalized concentration inequality for vector-valued martingales of arbitrary, possibly infinite, dimension. Finally, experimental evaluation and comparisons to existing algorithms on synthetic and real-world environments are carried out that highlight the favourable gains of the proposed strategies in many cases.}\n}", "pdf": "http://proceedings.mlr.press/v70/chowdhury17a/chowdhury17a.pdf", "supp": "", "pdf_size": 403915, "gs_citation": 558, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1022652599500519205&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Electrical Communication Engineering, Indian Institute of Science, Bengaluru, 560012, India; Department of Electrical Communication Engineering, Indian Institute of Science, Bengaluru, 560012, India", "aff_domain": "ece.iisc.ernet.in; ", "email": "ece.iisc.ernet.in; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/chowdhury17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Indian Institute of Science", "aff_unique_dep": "Department of Electrical Communication Engineering", "aff_unique_url": "https://www.iisc.ac.in", "aff_unique_abbr": "IISc", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Bengaluru", "aff_country_unique_index": "0;0", "aff_country_unique": "India" }, { "title": "On Mixed Memberships and Symmetric Nonnegative Matrix Factorizations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/602", "id": "602", "author_site": "Xueyu Mao, Purnamrita Sarkar, Deepayan Chakrabarti", "author": "Xueyu Mao; Purnamrita Sarkar; Deepayan Chakrabarti", "abstract": "The problem of finding overlapping communities in networks has gained much attention recently. Optimization-based approaches use non-negative matrix factorization (NMF) or variants, but the global optimum cannot be provably attained in general. Model-based approaches, such as the popular mixed-membership stochastic blockmodel or MMSB (Airoldi et al., 2008), use parameters for each node to specify the overlapping communities, but standard inference techniques cannot guarantee consistency. We link the two approaches, by (a) establishing sufficient conditions for the symmetric NMF optimization to have a unique solution under MMSB, and (b) proposing a computationally efficient algorithm called GeoNMF that is provably optimal and hence consistent for a broad parameter regime. We demonstrate its accuracy on both simulated and real-world datasets.", "bibtex": "@InProceedings{pmlr-v70-mao17a,\n title = \t {On Mixed Memberships and Symmetric Nonnegative Matrix Factorizations},\n author = {Xueyu Mao and Purnamrita Sarkar and Deepayan Chakrabarti},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2324--2333},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mao17a/mao17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mao17a.html},\n abstract = \t {The problem of finding overlapping communities in networks has gained much attention recently. Optimization-based approaches use non-negative matrix factorization (NMF) or variants, but the global optimum cannot be provably attained in general. Model-based approaches, such as the popular mixed-membership stochastic blockmodel or MMSB (Airoldi et al., 2008), use parameters for each node to specify the overlapping communities, but standard inference techniques cannot guarantee consistency. We link the two approaches, by (a) establishing sufficient conditions for the symmetric NMF optimization to have a unique solution under MMSB, and (b) proposing a computationally efficient algorithm called GeoNMF that is provably optimal and hence consistent for a broad parameter regime. We demonstrate its accuracy on both simulated and real-world datasets.}\n}", "pdf": "http://proceedings.mlr.press/v70/mao17a/mao17a.pdf", "supp": "", "pdf_size": 444313, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17373280921383688563&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science; Department of Statistics and Data Sciences; Department of Information, Risk, and Operations Management", "aff_domain": "cs.utexas.edu;austin.utexas.edu;utexas.edu", "email": "cs.utexas.edu;austin.utexas.edu;utexas.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mao17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Unknown Institution;University of Texas at Austin;University of Florida", "aff_unique_dep": "Department of Computer Science;Department of Statistics and Data Sciences;Department of Information, Risk, and Operations Management", "aff_unique_url": ";https://www.stat.utexas.edu;https://warrington.ufl.edu/", "aff_unique_abbr": ";UT Austin;", "aff_campus_unique_index": "1", "aff_campus_unique": ";Austin", "aff_country_unique_index": "1;1", "aff_country_unique": ";United States" }, { "title": "On Relaxing Determinism in Arithmetic Circuits", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/815", "id": "815", "author_site": "Arthur Choi, Adnan Darwiche", "author": "Arthur Choi; Adnan Darwiche", "abstract": "The past decade has seen a significant interest in learning tractable probabilistic representations. Arithmetic circuits (ACs) were among the first proposed tractable representations, with some subsequent representations being instances of ACs with weaker or stronger properties. In this paper, we provide a formal basis under which variants on ACs can be compared, and where the precise roles and semantics of their various properties can be made more transparent. This allows us to place some recent developments on ACs in a clearer perspective and to also derive new results for ACs. This includes an exponential separation between ACs with and without determinism; completeness and incompleteness results; and tractability results (or lack thereof) when computing most probable explanations (MPEs).", "bibtex": "@InProceedings{pmlr-v70-choi17a,\n title = \t {On Relaxing Determinism in Arithmetic Circuits},\n author = {Arthur Choi and Adnan Darwiche},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {825--833},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/choi17a/choi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/choi17a.html},\n abstract = \t {The past decade has seen a significant interest in learning tractable probabilistic representations. Arithmetic circuits (ACs) were among the first proposed tractable representations, with some subsequent representations being instances of ACs with weaker or stronger properties. In this paper, we provide a formal basis under which variants on ACs can be compared, and where the precise roles and semantics of their various properties can be made more transparent. This allows us to place some recent developments on ACs in a clearer perspective and to also derive new results for ACs. This includes an exponential separation between ACs with and without determinism; completeness and incompleteness results; and tractability results (or lack thereof) when computing most probable explanations (MPEs).}\n}", "pdf": "http://proceedings.mlr.press/v70/choi17a/choi17a.pdf", "supp": "", "pdf_size": 320303, "gs_citation": 74, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10151427635948258068&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "University of California, Los Angeles, California, USA; University of California, Los Angeles, California, USA", "aff_domain": "cs.ucla.edu;cs.ucla.edu", "email": "cs.ucla.edu;cs.ucla.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/choi17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Los Angeles", "aff_unique_dep": "", "aff_unique_url": "https://www.ucla.edu", "aff_unique_abbr": "UCLA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Los Angeles", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On The Projection Operator to A Three-view Cardinality Constrained Set", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/803", "id": "803", "author_site": "Haichuan Yang, Shupeng Gui, Chuyang Ke, Daniel Stefankovic, Ryohei Fujimaki, Ji Liu", "author": "Haichuan Yang; Shupeng Gui; Chuyang Ke; Daniel Stefankovic; Ryohei Fujimaki; Ji Liu", "abstract": "The cardinality constraint is an intrinsic way to restrict the solution structure in many domains, for example, sparse learning, feature selection, and compressed sensing. To solve a cardinality constrained problem, the key challenge is to solve the projection onto the cardinality constraint set, which is NP-hard in general when there exist multiple overlapped cardinality constraints. In this paper, we consider the scenario where the overlapped cardinality constraints satisfy a Three-view Cardinality Structure (TVCS), which reflects the natural restriction in many applications, such as identification of gene regulatory networks and task-worker assignment problem. We cast the projection into a linear programming, and show that for TVCS, the vertex solution of this linear programming is the solution for the original projection problem. We further prove that such solution can be found with the complexity proportional to the number of variables and constraints. We finally use synthetic experiments and two interesting applications in bioinformatics and crowdsourcing to validate the proposed TVCS model and method.", "bibtex": "@InProceedings{pmlr-v70-yang17c,\n title = \t {On The Projection Operator to A Three-view Cardinality Constrained Set},\n author = {Haichuan Yang and Shupeng Gui and Chuyang Ke and Daniel Stefankovic and Ryohei Fujimaki and Ji Liu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3871--3880},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17c/yang17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17c.html},\n abstract = \t {The cardinality constraint is an intrinsic way to restrict the solution structure in many domains, for example, sparse learning, feature selection, and compressed sensing. To solve a cardinality constrained problem, the key challenge is to solve the projection onto the cardinality constraint set, which is NP-hard in general when there exist multiple overlapped cardinality constraints. In this paper, we consider the scenario where the overlapped cardinality constraints satisfy a Three-view Cardinality Structure (TVCS), which reflects the natural restriction in many applications, such as identification of gene regulatory networks and task-worker assignment problem. We cast the projection into a linear programming, and show that for TVCS, the vertex solution of this linear programming is the solution for the original projection problem. We further prove that such solution can be found with the complexity proportional to the number of variables and constraints. We finally use synthetic experiments and two interesting applications in bioinformatics and crowdsourcing to validate the proposed TVCS model and method.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17c/yang17c.pdf", "supp": "", "pdf_size": 2035208, "gs_citation": 0, "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:sS1NpNQndtgJ:scholar.google.com/&scioq=On+The+Projection+Operator+to+A+Three-view+Cardinality+Constrained+Set&hl=en&as_sdt=0,14", "gs_version_total": 6, "aff": "University of Rochester; University of Rochester; University of Rochester; University of Rochester; NEC; University of Rochester", "aff_domain": "rochester.edu; ; ; ; ; ", "email": "rochester.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/yang17c.html", "aff_unique_index": "0;0;0;0;1;0", "aff_unique_norm": "University of Rochester;NEC Corporation", "aff_unique_dep": ";", "aff_unique_url": "https://www.rochester.edu;https://www.nec.com", "aff_unique_abbr": "U of R;NEC", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;1;0", "aff_country_unique": "United States;Japan" }, { "title": "On orthogonality and learning recurrent networks with long term dependencies", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/740", "id": "740", "author_site": "Eugene Vorontsov, Chiheb Trabelsi, Christopher Pal, Samuel Kadoury", "author": "Eugene Vorontsov; Chiheb Trabelsi; Samuel Kadoury; Chris Pal", "abstract": "It is well known that it is challenging to train deep neural networks and recurrent neural networks for tasks that exhibit long term dependencies. The vanishing or exploding gradient problem is a well known issue associated with these challenges. One approach to addressing vanishing and exploding gradients is to use either soft or hard constraints on weight matrices so as to encourage or enforce orthogonality. Orthogonal matrices preserve gradient norm during backpropagation and may therefore be a desirable property. This paper explores issues with optimization convergence, speed and gradient stability when encouraging or enforcing orthogonality. To perform this analysis, we propose a weight matrix factorization and parameterization strategy through which we can bound matrix norms and therein control the degree of expansivity induced during backpropagation. We find that hard constraints on orthogonality can negatively affect the speed of convergence and model performance.", "bibtex": "@InProceedings{pmlr-v70-vorontsov17a,\n title = \t {On orthogonality and learning recurrent networks with long term dependencies},\n author = {Eugene Vorontsov and Chiheb Trabelsi and Samuel Kadoury and Chris Pal},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3570--3578},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/vorontsov17a/vorontsov17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/vorontsov17a.html},\n abstract = \t {It is well known that it is challenging to train deep neural networks and recurrent neural networks for tasks that exhibit long term dependencies. The vanishing or exploding gradient problem is a well known issue associated with these challenges. One approach to addressing vanishing and exploding gradients is to use either soft or hard constraints on weight matrices so as to encourage or enforce orthogonality. Orthogonal matrices preserve gradient norm during backpropagation and may therefore be a desirable property. This paper explores issues with optimization convergence, speed and gradient stability when encouraging or enforcing orthogonality. To perform this analysis, we propose a weight matrix factorization and parameterization strategy through which we can bound matrix norms and therein control the degree of expansivity induced during backpropagation. We find that hard constraints on orthogonality can negatively affect the speed of convergence and model performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/vorontsov17a/vorontsov17a.pdf", "supp": "", "pdf_size": 4232927, "gs_citation": 281, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12745127873332927893&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "\u00b4Ecole Polytechnique de Montr \u00b4eal + Montreal Institute for Learning Algorithms; \u00b4Ecole Polytechnique de Montr \u00b4eal + Montreal Institute for Learning Algorithms; \u00b4Ecole Polytechnique de Montr \u00b4eal + CHUM Research Center; \u00b4Ecole Polytechnique de Montr \u00b4eal + Montreal Institute for Learning Algorithms", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/vorontsov17a.html", "aff_unique_index": "0+1;0+1;0+2;0+1", "aff_unique_norm": "Ecole Polytechnique de Montr\u00e9al;Montreal Institute for Learning Algorithms;CHUM Research Center", "aff_unique_dep": ";;", "aff_unique_url": "https://www.polymtl.ca;https://mila.quebec;https://crchum.ca", "aff_unique_abbr": "Polytechnique Montr\u00e9al;MILA;", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Montr\u00e9al;", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "Canada" }, { "title": "On the Expressive Power of Deep Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/841", "id": "841", "author_site": "Maithra Raghu, Ben Poole, Surya Ganguli, Jon Kleinberg, Jascha Sohl-Dickstein", "author": "Maithra Raghu; Ben Poole; Jon Kleinberg; Surya Ganguli; Jascha Sohl-Dickstein", "abstract": "We propose a new approach to the problem of neural network expressivity, which seeks to characterize how structural properties of a neural network family affect the functions it is able to compute. Our approach is based on an interrelated set of measures of expressivity, unified by the novel notion of trajectory length, which measures how the output of a network changes as the input sweeps along a one-dimensional path. Our findings show that: (1) The complexity of the computed function grows exponentially with depth (2) All weights are not equal: trained networks are more sensitive to their lower (initial) layer weights (3) Trajectory regularization is a simpler alternative to batch normalization, with the same performance.", "bibtex": "@InProceedings{pmlr-v70-raghu17a,\n title = \t {On the Expressive Power of Deep Neural Networks},\n author = {Maithra Raghu and Ben Poole and Jon Kleinberg and Surya Ganguli and Jascha Sohl-Dickstein},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2847--2854},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/raghu17a/raghu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/raghu17a.html},\n abstract = \t {We propose a new approach to the problem of neural network expressivity, which seeks to characterize how structural properties of a neural network family affect the functions it is able to compute. Our approach is based on an interrelated set of measures of expressivity, unified by the novel notion of trajectory length, which measures how the output of a network changes as the input sweeps along a one-dimensional path. Our findings show that: (1) The complexity of the computed function grows exponentially with depth (2) All weights are not equal: trained networks are more sensitive to their lower (initial) layer weights (3) Trajectory regularization is a simpler alternative to batch normalization, with the same performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/raghu17a/raghu17a.pdf", "supp": "", "pdf_size": 0, "gs_citation": 1057, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6162680448928462350&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": ";;;;", "aff_domain": ";;;;", "email": ";;;;", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/raghu17a.html" }, { "title": "On the Iteration Complexity of Support Recovery via Hard Thresholding Pursuit", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/663", "id": "663", "author_site": "Jie Shen, Ping Li", "author": "Jie Shen; Ping Li", "abstract": "Recovering the support of a sparse signal from its compressed samples has been one of the most important problems in high dimensional statistics. In this paper, we present a novel analysis for the hard thresholding pursuit (HTP) algorithm, showing that it exactly recovers the support of an arbitrary s-sparse signal within O(sklogk) iterations via a properly chosen proxy function, where k is the condition number of the problem. In stark contrast to the theoretical results in the literature, the iteration complexity we obtained holds without assuming the restricted isometry property, or relaxing the sparsity, or utilizing the optimality of the underlying signal. We further extend our result to a more challenging scenario, where the subproblem involved in HTP cannot be solved exactly. We prove that even in this setting, support recovery is possible and the computational complexity of HTP is established. Numerical study substantiates our theoretical results.", "bibtex": "@InProceedings{pmlr-v70-shen17a,\n title = \t {On the Iteration Complexity of Support Recovery via Hard Thresholding Pursuit},\n author = {Jie Shen and Ping Li},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3115--3124},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shen17a/shen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shen17a.html},\n abstract = \t {Recovering the support of a sparse signal from its compressed samples has been one of the most important problems in high dimensional statistics. In this paper, we present a novel analysis for the hard thresholding pursuit (HTP) algorithm, showing that it exactly recovers the support of an arbitrary s-sparse signal within O(sklogk) iterations via a properly chosen proxy function, where k is the condition number of the problem. In stark contrast to the theoretical results in the literature, the iteration complexity we obtained holds without assuming the restricted isometry property, or relaxing the sparsity, or utilizing the optimality of the underlying signal. We further extend our result to a more challenging scenario, where the subproblem involved in HTP cannot be solved exactly. We prove that even in this setting, support recovery is possible and the computational complexity of HTP is established. Numerical study substantiates our theoretical results.}\n}", "pdf": "http://proceedings.mlr.press/v70/shen17a/shen17a.pdf", "supp": "", "pdf_size": 192536, "gs_citation": 37, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5152922067097992999&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Rutgers University, Piscataway, New Jersey, USA; Rutgers University, Piscataway, New Jersey, USA", "aff_domain": "rutgers.edu;stat.rutgers.edu", "email": "rutgers.edu;stat.rutgers.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/shen17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Rutgers University", "aff_unique_dep": "", "aff_unique_url": "https://www.rutgers.edu", "aff_unique_abbr": "Rutgers", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Piscataway", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "On the Sampling Problem for Kernel Quadrature", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/695", "id": "695", "author_site": "Francois-Xavier Briol, Chris J Oates, Jon Cockayne, Wilson Ye Chen, Mark Girolami", "author": "Fran\u00e7ois-Xavier Briol; Chris J. Oates; Jon Cockayne; Wilson Ye Chen; Mark Girolami", "abstract": "The standard Kernel Quadrature method for numerical integration with random point sets (also called Bayesian Monte Carlo) is known to converge in root mean square error at a rate determined by the ratio s/d, where s and d encode the smoothness and dimension of the integrand. However, an empirical investigation reveals that the rate constant C is highly sensitive to the distribution of the random points. In contrast to standard Monte Carlo integration, for which optimal importance sampling is well-understood, the sampling distribution that minimises C for Kernel Quadrature does not admit a closed form. This paper argues that the practical choice of sampling distribution is an important open problem. One solution is considered; a novel automatic approach based on adaptive tempering and sequential Monte Carlo. Empirical results demonstrate a dramatic reduction in integration error of up to 4 orders of magnitude can be achieved with the proposed method.", "bibtex": "@InProceedings{pmlr-v70-briol17a,\n title = \t {On the Sampling Problem for Kernel Quadrature},\n author = {Fran{\\c{c}}ois-Xavier Briol and Chris J. Oates and Jon Cockayne and Wilson Ye Chen and Mark Girolami},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {586--595},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/briol17a/briol17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/briol17a.html},\n abstract = \t {The standard Kernel Quadrature method for numerical integration with random point sets (also called Bayesian Monte Carlo) is known to converge in root mean square error at a rate determined by the ratio s/d, where s and d encode the smoothness and dimension of the integrand. However, an empirical investigation reveals that the rate constant C is highly sensitive to the distribution of the random points. In contrast to standard Monte Carlo integration, for which optimal importance sampling is well-understood, the sampling distribution that minimises C for Kernel Quadrature does not admit a closed form. This paper argues that the practical choice of sampling distribution is an important open problem. One solution is considered; a novel automatic approach based on adaptive tempering and sequential Monte Carlo. Empirical results demonstrate a dramatic reduction in integration error of up to 4 orders of magnitude can be achieved with the proposed method.}\n}", "pdf": "http://proceedings.mlr.press/v70/briol17a/briol17a.pdf", "supp": "", "pdf_size": 622323, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14577328265390702928&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "University of Warwick, Department of Statistics + Imperial College London, Department of Mathematics; Newcastle University, School of Mathematics and Statistics + The Alan Turing Institute for Data Science; University of Warwick, Department of Statistics; University of Technology Sydney, School of Mathematical and Physical Sciences; Imperial College London, Department of Mathematics + The Alan Turing Institute for Data Science", "aff_domain": "warwick.ac.uk; ; ; ; ", "email": "warwick.ac.uk; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/briol17a.html", "aff_unique_index": "0+1;2+3;0;4;1+3", "aff_unique_norm": "University of Warwick;Imperial College London;Newcastle University;Alan Turing Institute;University of Technology Sydney", "aff_unique_dep": "Department of Statistics;Department of Mathematics;School of Mathematics and Statistics;Data Science;School of Mathematical and Physical Sciences", "aff_unique_url": "https://www.warwick.ac.uk;https://www.imperial.ac.uk;https://www.ncl.ac.uk;https://turing.ac.uk;https://www.uts.edu.au", "aff_unique_abbr": "Warwick;ICL;NU;ATI;UTS", "aff_campus_unique_index": "1;;2;1", "aff_campus_unique": ";London;Sydney", "aff_country_unique_index": "0+0;0+0;0;1;0+0", "aff_country_unique": "United Kingdom;Australia" }, { "title": "Online Learning to Rank in Stochastic Click Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/645", "id": "645", "author_site": "Masrour Zoghi, Tomas Tunys, Mohammad Ghavamzadeh, Branislav Kveton, Csaba Szepesvari, Zheng Wen", "author": "Masrour Zoghi; Tomas Tunys; Mohammad Ghavamzadeh; Branislav Kveton; Csaba Szepesvari; Zheng Wen", "abstract": "Online learning to rank is a core problem in information retrieval and machine learning. Many provably efficient algorithms have been recently proposed for this problem in specific click models. The click model is a model of how the user interacts with a list of documents. Though these results are significant, their impact on practice is limited, because all proposed algorithms are designed for specific click models and lack convergence guarantees in other models. In this work, we propose BatchRank, the first online learning to rank algorithm for a broad class of click models. The class encompasses two most fundamental click models, the cascade and position-based models. We derive a gap-dependent upper bound on the T-step regret of BatchRank and evaluate it on a range of web search queries. We observe that BatchRank outperforms ranked bandits and is more robust than CascadeKL-UCB, an existing algorithm for the cascade model.", "bibtex": "@InProceedings{pmlr-v70-zoghi17a,\n title = \t {Online Learning to Rank in Stochastic Click Models},\n author = {Masrour Zoghi and Tomas Tunys and Mohammad Ghavamzadeh and Branislav Kveton and Csaba Szepesvari and Zheng Wen},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4199--4208},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zoghi17a/zoghi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zoghi17a.html},\n abstract = \t {Online learning to rank is a core problem in information retrieval and machine learning. Many provably efficient algorithms have been recently proposed for this problem in specific click models. The click model is a model of how the user interacts with a list of documents. Though these results are significant, their impact on practice is limited, because all proposed algorithms are designed for specific click models and lack convergence guarantees in other models. In this work, we propose BatchRank, the first online learning to rank algorithm for a broad class of click models. The class encompasses two most fundamental click models, the cascade and position-based models. We derive a gap-dependent upper bound on the T-step regret of BatchRank and evaluate it on a range of web search queries. We observe that BatchRank outperforms ranked bandits and is more robust than CascadeKL-UCB, an existing algorithm for the cascade model.}\n}", "pdf": "http://proceedings.mlr.press/v70/zoghi17a/zoghi17a.pdf", "supp": "", "pdf_size": 592941, "gs_citation": 123, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=808127481622260281&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Independent Researcher, Vancouver, BC, Canada+Adobe Research; Czech Technical University, Prague, Czech Republic; DeepMind, Mountain View, CA, USA+Adobe Research; Adobe Research, San Jose, CA, USA; University of Alberta, Edmonton, AB, Canada; Adobe Research, San Jose, CA, USA", "aff_domain": "zoghi.org; ; ;adobe.com; ; ", "email": "zoghi.org; ; ;adobe.com; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/zoghi17a.html", "aff_unique_index": "0+1;2;3+1;1;4;1", "aff_unique_norm": "Independent Researcher;Adobe;Czech Technical University;DeepMind;University of Alberta", "aff_unique_dep": ";Adobe Research;;;", "aff_unique_url": ";https://research.adobe.com;https://www.ctu.cz;https://deepmind.com;https://www.ualberta.ca", "aff_unique_abbr": ";Adobe;CTU;DeepMind;UAlberta", "aff_campus_unique_index": ";1;2;3;4;3", "aff_campus_unique": ";Prague;Mountain View;San Jose;Edmonton", "aff_country_unique_index": "0+1;2;1+1;1;0;1", "aff_country_unique": "Canada;United States;Czech Republic" }, { "title": "Online Learning with Local Permutations and Delayed Feedback", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/533", "id": "533", "author_site": "Liran Szlak, Ohad Shamir", "author": "Ohad Shamir; Liran Szlak", "abstract": "We propose an Online Learning with Local Permutations (OLLP) setting, in which the learner is allowed to slightly permute the", "bibtex": "@InProceedings{pmlr-v70-shamir17a,\n title = \t {Online Learning with Local Permutations and Delayed Feedback},\n author = {Ohad Shamir and Liran Szlak},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3086--3094},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shamir17a/shamir17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shamir17a.html},\n abstract = \t {We propose an Online Learning with Local Permutations (OLLP) setting, in which the learner is allowed to slightly permute the", "pdf": "http://proceedings.mlr.press/v70/shamir17a/shamir17a.pdf", "supp": "", "pdf_size": 795415, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5743386269076588892&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Weizmann Institute of Science; Weizmann Institute of Science", "aff_domain": "weizmann.ac.il;weizmann.ac.il", "email": "weizmann.ac.il;weizmann.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/shamir17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Weizmann Institute of Science", "aff_unique_dep": "", "aff_unique_url": "https://www.weizmann.org.il", "aff_unique_abbr": "Weizmann", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Online Partial Least Square Optimization: Dropping Convexity for Better Efficiency and Scalability", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/791", "id": "791", "author_site": "Zhehui Chen, Lin Yang, Chris Junchi Li, Tuo Zhao", "author": "Zhehui Chen; Lin F. Yang; Chris Junchi Li; Tuo Zhao", "abstract": "Multiview representation learning is popular for latent factor analysis. Many existing approaches formulate the multiview representation learning as convex optimization problems, where global optima can be obtained by certain algorithms in polynomial time. However, many evidences have corroborated that heuristic nonconvex approaches also have good empirical computational performance and convergence to the global optima, although there is a lack of theoretical justification. Such a gap between theory and practice motivates us to study a nonconvex formulation for multiview representation learning, which can be efficiently solved by a simple stochastic gradient descent method. By analyzing the dynamics of the algorithm based on diffusion processes, we establish a global rate of convergence to the global optima. Numerical experiments are provided to support our theory.", "bibtex": "@InProceedings{pmlr-v70-chen17h,\n title = \t {Online Partial Least Square Optimization: Dropping Convexity for Better Efficiency and Scalability},\n author = {Zhehui Chen and Lin F. Yang and Chris Junchi Li and Tuo Zhao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {777--786},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17h/chen17h.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17h.html},\n abstract = \t {Multiview representation learning is popular for latent factor analysis. Many existing approaches formulate the multiview representation learning as convex optimization problems, where global optima can be obtained by certain algorithms in polynomial time. However, many evidences have corroborated that heuristic nonconvex approaches also have good empirical computational performance and convergence to the global optima, although there is a lack of theoretical justification. Such a gap between theory and practice motivates us to study a nonconvex formulation for multiview representation learning, which can be efficiently solved by a simple stochastic gradient descent method. By analyzing the dynamics of the algorithm based on diffusion processes, we establish a global rate of convergence to the global optima. Numerical experiments are provided to support our theory.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17h/chen17h.pdf", "supp": "", "pdf_size": 351096, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14758014731924148586&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Georgia Institute of Technology; Johns Hopkins University; Princeton University; Georgia Institute of Technology", "aff_domain": "gatech.edu;jhu.edu;princeton.edu;gatech.edu", "email": "gatech.edu;jhu.edu;princeton.edu;gatech.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/chen17h.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Georgia Institute of Technology;Johns Hopkins University;Princeton University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.gatech.edu;https://www.jhu.edu;https://www.princeton.edu", "aff_unique_abbr": "Georgia Tech;JHU;Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Online and Linear-Time Attention by Enforcing Monotonic Alignments", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/866", "id": "866", "author_site": "Colin Raffel, Thang Luong, Peter Liu, Ron Weiss, Douglas Eck", "author": "Colin Raffel; Minh-Thang Luong; Peter J. Liu; Ron J. Weiss; Douglas Eck", "abstract": "Recurrent neural network models with an attention mechanism have proven to be extremely effective on a wide variety of sequence-to-sequence problems. However, the fact that soft attention mechanisms perform a pass over the entire input sequence when producing each element in the output sequence precludes their use in online settings and results in a quadratic time complexity. Based on the insight that the alignment between input and output sequence elements is monotonic in many problems of interest, we propose an end-to-end differentiable method for learning monotonic alignments which, at test time, enables computing attention online and in linear time. We validate our approach on sentence summarization, machine translation, and online speech recognition problems and achieve results competitive with existing sequence-to-sequence models.", "bibtex": "@InProceedings{pmlr-v70-raffel17a,\n title = \t {Online and Linear-Time Attention by Enforcing Monotonic Alignments},\n author = {Colin Raffel and Minh-Thang Luong and Peter J. Liu and Ron J. Weiss and Douglas Eck},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2837--2846},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/raffel17a/raffel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/raffel17a.html},\n abstract = \t {Recurrent neural network models with an attention mechanism have proven to be extremely effective on a wide variety of sequence-to-sequence problems. However, the fact that soft attention mechanisms perform a pass over the entire input sequence when producing each element in the output sequence precludes their use in online settings and results in a quadratic time complexity. Based on the insight that the alignment between input and output sequence elements is monotonic in many problems of interest, we propose an end-to-end differentiable method for learning monotonic alignments which, at test time, enables computing attention online and in linear time. We validate our approach on sentence summarization, machine translation, and online speech recognition problems and achieve results competitive with existing sequence-to-sequence models.}\n}", "pdf": "http://proceedings.mlr.press/v70/raffel17a/raffel17a.pdf", "supp": "", "pdf_size": 567938, "gs_citation": 331, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12685327783862697690&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA; Google Brain, Mountain View, California, USA", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/raffel17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google Brain", "aff_unique_url": "https://brain.google.com", "aff_unique_abbr": "Google Brain", "aff_campus_unique_index": "0;0;0;0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "OptNet: Differentiable Optimization as a Layer in Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/844", "id": "844", "author_site": "Brandon Amos, Zico Kolter", "author": "Brandon Amos; J. Zico Kolter", "abstract": "This paper presents OptNet, a network architecture that integrates optimization problems (here, specifically in the form of quadratic programs) as individual layers in larger end-to-end trainable deep networks. These layers encode constraints and complex dependencies between the hidden states that traditional convolutional and fully-connected layers often cannot capture. In this paper, we explore the foundations for such an architecture: we show how techniques from sensitivity analysis, bilevel optimization, and implicit differentiation can be used to exactly differentiate through these layers and with respect to layer parameters; we develop a highly efficient solver for these layers that exploits fast GPU-based batch solves within a primal-dual interior point method, and which provides backpropagation gradients with virtually no additional cost on top of the solve; and we highlight the application of these approaches in several problems. In one notable example, we show that the method is capable of learning to play mini-Sudoku (4x4) given just input and output games, with no a priori information about the rules of the game; this highlights the ability of our architecture to learn hard constraints better than other neural architectures.", "bibtex": "@InProceedings{pmlr-v70-amos17a,\n title = \t {{O}pt{N}et: Differentiable Optimization as a Layer in Neural Networks},\n author = {Brandon Amos and J. Zico Kolter},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {136--145},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/amos17a/amos17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/amos17a.html},\n abstract = \t {This paper presents OptNet, a network architecture that integrates optimization problems (here, specifically in the form of quadratic programs) as individual layers in larger end-to-end trainable deep networks. These layers encode constraints and complex dependencies between the hidden states that traditional convolutional and fully-connected layers often cannot capture. In this paper, we explore the foundations for such an architecture: we show how techniques from sensitivity analysis, bilevel optimization, and implicit differentiation can be used to exactly differentiate through these layers and with respect to layer parameters; we develop a highly efficient solver for these layers that exploits fast GPU-based batch solves within a primal-dual interior point method, and which provides backpropagation gradients with virtually no additional cost on top of the solve; and we highlight the application of these approaches in several problems. In one notable example, we show that the method is capable of learning to play mini-Sudoku (4x4) given just input and output games, with no a priori information about the rules of the game; this highlights the ability of our architecture to learn hard constraints better than other neural architectures.}\n}", "pdf": "http://proceedings.mlr.press/v70/amos17a/amos17a.pdf", "supp": "", "pdf_size": 615126, "gs_citation": 1228, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9740292310844529830&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computer Science, Carnegie Mellon University; School of Computer Science, Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/amos17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "School of Computer Science", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Optimal Algorithms for Smooth and Strongly Convex Distributed Optimization in Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/685", "id": "685", "author_site": "Kevin Scaman, Francis Bach, Sebastien Bubeck, Yin Tat Lee, Laurent Massouli\u00e9", "author": "Kevin Scaman; Francis Bach; S\u00e9bastien Bubeck; Yin Tat Lee; Laurent Massouli\u00e9", "abstract": "In this paper, we determine the optimal convergence rates for strongly convex and smooth distributed optimization in two settings: centralized and decentralized communications over a network. For centralized (i.e. master/slave) algorithms, we show that distributing Nesterov\u2019s accelerated gradient descent is optimal and achieves a precision $\\varepsilon > 0$ in time $O(\\sqrt{\\kappa_g}(1+\\Delta\\tau)\\ln(1/\\varepsilon))$, where $\\kappa_g$ is the condition number of the (global) function to optimize, $\\Delta$ is the diameter of the network, and $\\tau$ (resp. $1$) is the time needed to communicate values between two neighbors (resp. perform local computations). For decentralized algorithms based on gossip, we provide the first optimal algorithm, called the multi-step dual accelerated (MSDA) method, that achieves a precision $\\varepsilon > 0$ in time $O(\\sqrt{\\kappa_l}(1+\\frac{\\tau}{\\sqrt{\\gamma}})\\ln(1/\\varepsilon))$, where $\\kappa_l$ is the condition number of the local functions and $\\gamma$ is the (normalized) eigengap of the gossip matrix used for communication between nodes. We then verify the efficiency of MSDA against state-of-the-art methods for two problems: least-squares regression and classification by logistic regression.", "bibtex": "@InProceedings{pmlr-v70-scaman17a,\n title = \t {Optimal Algorithms for Smooth and Strongly Convex Distributed Optimization in Networks},\n author = {Kevin Scaman and Francis Bach and S{\\'e}bastien Bubeck and Yin Tat Lee and Laurent Massouli{\\'e}},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3027--3036},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/scaman17a/scaman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/scaman17a.html},\n abstract = \t {In this paper, we determine the optimal convergence rates for strongly convex and smooth distributed optimization in two settings: centralized and decentralized communications over a network. For centralized (i.e. master/slave) algorithms, we show that distributing Nesterov\u2019s accelerated gradient descent is optimal and achieves a precision $\\varepsilon > 0$ in time $O(\\sqrt{\\kappa_g}(1+\\Delta\\tau)\\ln(1/\\varepsilon))$, where $\\kappa_g$ is the condition number of the (global) function to optimize, $\\Delta$ is the diameter of the network, and $\\tau$ (resp. $1$) is the time needed to communicate values between two neighbors (resp. perform local computations). For decentralized algorithms based on gossip, we provide the first optimal algorithm, called the multi-step dual accelerated (MSDA) method, that achieves a precision $\\varepsilon > 0$ in time $O(\\sqrt{\\kappa_l}(1+\\frac{\\tau}{\\sqrt{\\gamma}})\\ln(1/\\varepsilon))$, where $\\kappa_l$ is the condition number of the local functions and $\\gamma$ is the (normalized) eigengap of the gossip matrix used for communication between nodes. We then verify the efficiency of MSDA against state-of-the-art methods for two problems: least-squares regression and classification by logistic regression.}\n}", "pdf": "http://proceedings.mlr.press/v70/scaman17a/scaman17a.pdf", "supp": "", "pdf_size": 437198, "gs_citation": 389, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3819446429322645042&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "MSR-INRIA Joint Center, Palaiseau, France; INRIA, Ecole Normale Sup \u00b4erieure, Paris, France; Theory group, Microsoft Research, Redmond, United States; Theory group, Microsoft Research, Redmond, United States; MSR-INRIA Joint Center, Palaiseau, France", "aff_domain": "gmail.com; ; ; ; ", "email": "gmail.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/scaman17a.html", "aff_unique_index": "0;1;2;2;0", "aff_unique_norm": "MSR-INRIA Joint Center;INRIA;Microsoft", "aff_unique_dep": ";;Theory group", "aff_unique_url": "https://www.msr-inria.fr;https://www.inria.fr;https://www.microsoft.com/en-us/research", "aff_unique_abbr": ";INRIA;MSR", "aff_campus_unique_index": "0;1;2;2;0", "aff_campus_unique": "Palaiseau;Paris;Redmond", "aff_country_unique_index": "0;0;1;1;0", "aff_country_unique": "France;United States" }, { "title": "Optimal Densification for Fast and Accurate Minwise Hashing", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/627", "id": "627", "author": "Anshumali Shrivastava", "abstract": "Minwise hashing is a fundamental and one of the most successful hashing algorithm in the literature. Recent advances based on the idea of densification (Shrivastava \\& Li, 2014) have shown that it is possible to compute $k$ minwise hashes, of a vector with $d$ nonzeros, in mere $(d + k)$ computations, a significant improvement over the classical $O(dk)$. These advances have led to an algorithmic improvement in the query complexity of traditional indexing algorithms based on minwise hashing. Unfortunately, the variance of the current densification techniques is unnecessarily high, which leads to significantly poor accuracy compared to vanilla minwise hashing, especially when the data is sparse. In this paper, we provide a novel densification scheme which relies on carefully tailored 2-universal hashes. We show that the proposed scheme is variance-optimal, and without losing the runtime efficiency, it is significantly more accurate than existing densification techniques. As a result, we obtain a significantly efficient hashing scheme which has the same variance and collision probability as minwise hashing. Experimental evaluations on real sparse and high-dimensional datasets validate our claims. We believe that given the significant advantages, our method will replace minwise hashing implementations in practice.", "bibtex": "@InProceedings{pmlr-v70-shrivastava17a,\n title = \t {Optimal Densification for Fast and Accurate Minwise Hashing},\n author = {Anshumali Shrivastava},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3154--3163},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shrivastava17a/shrivastava17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shrivastava17a.html},\n abstract = \t {Minwise hashing is a fundamental and one of the most successful hashing algorithm in the literature. Recent advances based on the idea of densification (Shrivastava \\& Li, 2014) have shown that it is possible to compute $k$ minwise hashes, of a vector with $d$ nonzeros, in mere $(d + k)$ computations, a significant improvement over the classical $O(dk)$. These advances have led to an algorithmic improvement in the query complexity of traditional indexing algorithms based on minwise hashing. Unfortunately, the variance of the current densification techniques is unnecessarily high, which leads to significantly poor accuracy compared to vanilla minwise hashing, especially when the data is sparse. In this paper, we provide a novel densification scheme which relies on carefully tailored 2-universal hashes. We show that the proposed scheme is variance-optimal, and without losing the runtime efficiency, it is significantly more accurate than existing densification techniques. As a result, we obtain a significantly efficient hashing scheme which has the same variance and collision probability as minwise hashing. Experimental evaluations on real sparse and high-dimensional datasets validate our claims. We believe that given the significant advantages, our method will replace minwise hashing implementations in practice.}\n}", "pdf": "http://proceedings.mlr.press/v70/shrivastava17a/shrivastava17a.pdf", "supp": "", "pdf_size": 1070012, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15586703778517319851&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Rice University, Houston, TX, USA", "aff_domain": "rice.edu", "email": "rice.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/shrivastava17a.html", "aff_unique_index": "0", "aff_unique_norm": "Rice University", "aff_unique_dep": "", "aff_unique_url": "https://www.rice.edu", "aff_unique_abbr": "Rice", "aff_campus_unique_index": "0", "aff_campus_unique": "Houston", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Optimal and Adaptive Off-policy Evaluation in Contextual Bandits", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/618", "id": "618", "author_site": "Yu-Xiang Wang, Alekh Agarwal, Miroslav Dudik", "author": "Yu-Xiang Wang; Alekh Agarwal; Miroslav Dud\u0131\u0301k", "abstract": "We study the off-policy evaluation problem\u2014estimating the value of a target policy using data collected by another policy\u2014under the contextual bandit model. We consider the general (agnostic) setting without access to a consistent model of rewards and establish a minimax lower bound on the mean squared error (MSE). The bound is matched up to constants by the inverse propensity scoring (IPS) and doubly robust (DR) estimators. This highlights the difficulty of the agnostic contextual setting, in contrast with multi-armed bandits and contextual bandits with access to a consistent reward model, where IPS is suboptimal. We then propose the SWITCH estimator, which can use an existing reward model (not necessarily consistent) to achieve a better bias-variance tradeoff than IPS and DR. We prove an upper bound on its MSE and demonstrate its benefits empirically on a diverse collection of datasets, often outperforming prior work by orders of magnitude.", "bibtex": "@InProceedings{pmlr-v70-wang17a,\n title = \t {Optimal and Adaptive Off-policy Evaluation in Contextual Bandits},\n author = {Yu-Xiang Wang and Alekh Agarwal and Miroslav Dud\\'{\\i}k},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3589--3597},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17a/wang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17a.html},\n abstract = \t {We study the off-policy evaluation problem\u2014estimating the value of a target policy using data collected by another policy\u2014under the contextual bandit model. We consider the general (agnostic) setting without access to a consistent model of rewards and establish a minimax lower bound on the mean squared error (MSE). The bound is matched up to constants by the inverse propensity scoring (IPS) and doubly robust (DR) estimators. This highlights the difficulty of the agnostic contextual setting, in contrast with multi-armed bandits and contextual bandits with access to a consistent reward model, where IPS is suboptimal. We then propose the SWITCH estimator, which can use an existing reward model (not necessarily consistent) to achieve a better bias-variance tradeoff than IPS and DR. We prove an upper bound on its MSE and demonstrate its benefits empirically on a diverse collection of datasets, often outperforming prior work by orders of magnitude.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17a/wang17a.pdf", "supp": "", "pdf_size": 399770, "gs_citation": 254, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6643537538376903584&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Carnegie Mellon University; Microsoft Research; Microsoft Research", "aff_domain": "cs.cmu.edu;microsoft.com;microsoft.com", "email": "cs.cmu.edu;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/wang17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Carnegie Mellon University;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "CMU;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Oracle Complexity of Second-Order Methods for Finite-Sum Problems", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/482", "id": "482", "author_site": "Yossi Arjevani, Ohad Shamir", "author": "Yossi Arjevani; Ohad Shamir", "abstract": "Finite-sum optimization problems are ubiquitous in machine learning, and are commonly solved using first-order methods which rely on gradient computations. Recently, there has been growing interest in", "bibtex": "@InProceedings{pmlr-v70-arjevani17a,\n title = \t {Oracle Complexity of Second-Order Methods for Finite-Sum Problems},\n author = {Yossi Arjevani and Ohad Shamir},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {205--213},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/arjevani17a/arjevani17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/arjevani17a.html},\n abstract = \t {Finite-sum optimization problems are ubiquitous in machine learning, and are commonly solved using first-order methods which rely on gradient computations. Recently, there has been growing interest in", "pdf": "http://proceedings.mlr.press/v70/arjevani17a/arjevani17a.pdf", "supp": "", "pdf_size": 354999, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4794729605504024432&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science and Applied Mathematics, Weizmann Institute of Science, Rehovot, Israel; Department of Computer Science and Applied Mathematics, Weizmann Institute of Science, Rehovot, Israel", "aff_domain": "weizmann.ac.il;weizmann.ac.il", "email": "weizmann.ac.il;weizmann.ac.il", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/arjevani17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Weizmann Institute of Science", "aff_unique_dep": "Department of Computer Science and Applied Mathematics", "aff_unique_url": "https://www.weizmann.ac.il", "aff_unique_abbr": "Weizmann", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Rehovot", "aff_country_unique_index": "0;0", "aff_country_unique": "Israel" }, { "title": "Ordinal Graphical Models: A Tale of Two Approaches", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/644", "id": "644", "author_site": "ARUN SAI SUGGALA, Eunho Yang, Pradeep Ravikumar", "author": "Arun Sai Suggala; Eunho Yang; Pradeep Ravikumar", "abstract": "Undirected graphical models or Markov random fields (MRFs) are widely used for modeling multivariate probability distributions. Much of the work on MRFs has focused on continuous variables, and nominal variables (that is, unordered categorical variables). However, data from many real world applications involve ordered categorical variables also known as ordinal variables, e.g., movie ratings on Netflix which can be ordered from 1 to 5 stars. With respect to univariate ordinal distributions, as we detail in the paper, there are two main categories of distributions; while there have been efforts to extend these to multivariate ordinal distributions, the resulting distributions are typically very complex, with either a large number of parameters, or with non-convex likelihoods. While there have been some work on tractable approximations, these do not come with strong statistical guarantees, and moreover are relatively computationally expensive. In this paper, we theoretically investigate two classes of graphical models for ordinal data, corresponding to the two main categories of univariate ordinal distributions. In contrast to previous work, our theoretical developments allow us to provide correspondingly two classes of estimators that are not only computationally efficient but also have strong statistical guarantees.", "bibtex": "@InProceedings{pmlr-v70-suggala17a,\n title = \t {Ordinal Graphical Models: A Tale of Two Approaches},\n author = {Arun Sai Suggala and Eunho Yang and Pradeep Ravikumar},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3260--3269},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/suggala17a/suggala17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/suggala17a.html},\n abstract = \t {Undirected graphical models or Markov random fields (MRFs) are widely used for modeling multivariate probability distributions. Much of the work on MRFs has focused on continuous variables, and nominal variables (that is, unordered categorical variables). However, data from many real world applications involve ordered categorical variables also known as ordinal variables, e.g., movie ratings on Netflix which can be ordered from 1 to 5 stars. With respect to univariate ordinal distributions, as we detail in the paper, there are two main categories of distributions; while there have been efforts to extend these to multivariate ordinal distributions, the resulting distributions are typically very complex, with either a large number of parameters, or with non-convex likelihoods. While there have been some work on tractable approximations, these do not come with strong statistical guarantees, and moreover are relatively computationally expensive. In this paper, we theoretically investigate two classes of graphical models for ordinal data, corresponding to the two main categories of univariate ordinal distributions. In contrast to previous work, our theoretical developments allow us to provide correspondingly two classes of estimators that are not only computationally efficient but also have strong statistical guarantees.}\n}", "pdf": "http://proceedings.mlr.press/v70/suggala17a/suggala17a.pdf", "supp": "", "pdf_size": 733545, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4955360029760182400&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Carnegie Mellon University, Pittsburgh, USA+AITrics, Seoul, South Korea; School of Computing, KAIST, Daejeon, South Korea+AITrics, Seoul, South Korea; Carnegie Mellon University, Pittsburgh, USA", "aff_domain": "andrew.cmu.edu;kaist.ac.kr; ", "email": "andrew.cmu.edu;kaist.ac.kr; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/suggala17a.html", "aff_unique_index": "0+1;2+1;0", "aff_unique_norm": "Carnegie Mellon University;AITRICS;KAIST", "aff_unique_dep": ";;School of Computing", "aff_unique_url": "https://www.cmu.edu;;https://www.kaist.ac.kr", "aff_unique_abbr": "CMU;;KAIST", "aff_campus_unique_index": "0+1;2+1;0", "aff_campus_unique": "Pittsburgh;Seoul;Daejeon", "aff_country_unique_index": "0+1;1+1;0", "aff_country_unique": "United States;South Korea" }, { "title": "Orthogonalized ALS: A Theoretically Principled Tensor Decomposition Algorithm for Practical Use", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/500", "id": "500", "author_site": "Vatsal Sharan, Gregory Valiant", "author": "Vatsal Sharan; Gregory Valiant", "abstract": "The popular Alternating Least Squares (ALS) algorithm for tensor decomposition is efficient and easy to implement, but often converges to poor local optima\u2014particularly when the weights of the factors are non-uniform. We propose a modification of the ALS approach that is as efficient as standard ALS, but provably recovers the true factors with random initialization under standard incoherence assumptions on the factors of the tensor. We demonstrate the significant practical superiority of our approach over traditional ALS for a variety of tasks on synthetic data\u2014including tensor factorization on exact, noisy and over-complete tensors, as well as tensor completion\u2014and for computing word embeddings from a third-order word tri-occurrence tensor.", "bibtex": "@InProceedings{pmlr-v70-sharan17a,\n title = \t {Orthogonalized {ALS}: A Theoretically Principled Tensor Decomposition Algorithm for Practical Use},\n author = {Vatsal Sharan and Gregory Valiant},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3095--3104},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sharan17a/sharan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sharan17a.html},\n abstract = \t {The popular Alternating Least Squares (ALS) algorithm for tensor decomposition is efficient and easy to implement, but often converges to poor local optima\u2014particularly when the weights of the factors are non-uniform. We propose a modification of the ALS approach that is as efficient as standard ALS, but provably recovers the true factors with random initialization under standard incoherence assumptions on the factors of the tensor. We demonstrate the significant practical superiority of our approach over traditional ALS for a variety of tasks on synthetic data\u2014including tensor factorization on exact, noisy and over-complete tensors, as well as tensor completion\u2014and for computing word embeddings from a third-order word tri-occurrence tensor.}\n}", "pdf": "http://proceedings.mlr.press/v70/sharan17a/sharan17a.pdf", "supp": "", "pdf_size": 649022, "gs_citation": 57, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3601623296996697170&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Stanford University; Stanford University", "aff_domain": "stanford.edu; ", "email": "stanford.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/sharan17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Pain-Free Random Differential Privacy with Sensitivity Sampling", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/727", "id": "727", "author_site": "Benjamin Rubinstein, Francesco Ald\u00e0", "author": "Benjamin I. P. Rubinstein; Francesco Ald\u00e0", "abstract": "Popular approaches to differential privacy, such as the Laplace and exponential mechanisms, calibrate randomised smoothing through global sensitivity of the target non-private function. Bounding such sensitivity is often a prohibitively complex analytic calculation. As an alternative, we propose a straightforward sampler for estimating sensitivity of non-private mechanisms. Since our sensitivity estimates hold with high probability, any mechanism that would be $(\\epsilon,\\delta)$-differentially private under bounded global sensitivity automatically achieves $(\\epsilon,\\delta,\\gamma)$-random differential privacy (Hall et al. 2012), without any target-specific calculations required. We demonstrate on worked example learners how our usable approach adopts a naturally-relaxed privacy guarantee, while achieving more accurate releases even for non-private functions that are black-box computer programs.", "bibtex": "@InProceedings{pmlr-v70-rubinstein17a,\n title = \t {Pain-Free Random Differential Privacy with Sensitivity Sampling},\n author = {Benjamin I. P. Rubinstein and Francesco Ald{\\`a}},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2950--2959},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rubinstein17a/rubinstein17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/rubinstein17a.html},\n abstract = \t {Popular approaches to differential privacy, such as the Laplace and exponential mechanisms, calibrate randomised smoothing through global sensitivity of the target non-private function. Bounding such sensitivity is often a prohibitively complex analytic calculation. As an alternative, we propose a straightforward sampler for estimating sensitivity of non-private mechanisms. Since our sensitivity estimates hold with high probability, any mechanism that would be $(\\epsilon,\\delta)$-differentially private under bounded global sensitivity automatically achieves $(\\epsilon,\\delta,\\gamma)$-random differential privacy (Hall et al. 2012), without any target-specific calculations required. We demonstrate on worked example learners how our usable approach adopts a naturally-relaxed privacy guarantee, while achieving more accurate releases even for non-private functions that are black-box computer programs.}\n}", "pdf": "http://proceedings.mlr.press/v70/rubinstein17a/rubinstein17a.pdf", "supp": "", "pdf_size": 458497, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18193309706967817285&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "School of Computing and Information Systems, University of Melbourne, Australia; Horst G \u00a8ortz Institute for IT Security and Faculty of Mathematics, Ruhr-Universit \u00a8at Bochum, Germany", "aff_domain": "unimelb.edu.au;rub.de", "email": "unimelb.edu.au;rub.de", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/rubinstein17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "University of Melbourne;Ruhr-Universit\u00c3\u00a4t Bochum", "aff_unique_dep": "School of Computing and Information Systems;Horst G \u00c3\u00b6rtz Institute for IT Security and Faculty of Mathematics", "aff_unique_url": "https://www.unimelb.edu.au;https://www.ruhr-uni-bochum.de", "aff_unique_abbr": "UniMelb;", "aff_campus_unique_index": "0", "aff_campus_unique": "Melbourne;", "aff_country_unique_index": "0;1", "aff_country_unique": "Australia;Germany" }, { "title": "Parallel Multiscale Autoregressive Density Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/643", "id": "643", "author_site": "Scott Reed, A\u00e4ron van den Oord, Nal Kalchbrenner, Sergio G\u00f3mez Colmenarejo, Ziyu Wang, Yutian Chen, Dan Belov, Nando de Freitas", "author": "Scott Reed; A\u00e4ron Oord; Nal Kalchbrenner; Sergio G\u00f3mez Colmenarejo; Ziyu Wang; Yutian Chen; Dan Belov; Nando Freitas", "abstract": "PixelCNN achieves state-of-the-art results in density estimation for natural images. Although training is fast, inference is costly, requiring one network evaluation per pixel; O(N) for N pixels. This can be sped up by caching activations, but still involves generating each pixel sequentially. In this work, we propose a parallelized PixelCNN that allows more efficient inference by modeling certain pixel groups as conditionally independent. Our new PixelCNN model achieves competitive density estimation and orders of magnitude speedup \u2013 O(log N) sampling instead of O(N) \u2013 enabling the practical generation of 512x512 images. We evaluate the model on class-conditional image generation, text-to-image synthesis, and action-conditional video generation, showing that our model achieves the best results among non-pixel-autoregressive density models that allow efficient sampling.", "bibtex": "@InProceedings{pmlr-v70-reed17a,\n title = \t {Parallel Multiscale Autoregressive Density Estimation},\n author = {Scott Reed and A{\\\"a}ron van den Oord and Nal Kalchbrenner and Sergio G{\\'o}mez Colmenarejo and Ziyu Wang and Yutian Chen and Dan Belov and Nando de Freitas},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2912--2921},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/reed17a/reed17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/reed17a.html},\n abstract = \t {PixelCNN achieves state-of-the-art results in density estimation for natural images. Although training is fast, inference is costly, requiring one network evaluation per pixel; O(N) for N pixels. This can be sped up by caching activations, but still involves generating each pixel sequentially. In this work, we propose a parallelized PixelCNN that allows more efficient inference by modeling certain pixel groups as conditionally independent. Our new PixelCNN model achieves competitive density estimation and orders of magnitude speedup \u2013 O(log N) sampling instead of O(N) \u2013 enabling the practical generation of 512x512 images. We evaluate the model on class-conditional image generation, text-to-image synthesis, and action-conditional video generation, showing that our model achieves the best results among non-pixel-autoregressive density models that allow efficient sampling.}\n}", "pdf": "http://proceedings.mlr.press/v70/reed17a/reed17a.pdf", "supp": "", "pdf_size": 3560385, "gs_citation": 261, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15765918242189260465&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", "aff_domain": "google.com; ; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/reed17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Parallel and Distributed Thompson Sampling for Large-scale Accelerated Exploration of Chemical Space", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/767", "id": "767", "author_site": "Jose Miguel Hernandez-Lobato, James Requeima, Edward Pyzer-Knapp, Alan Aspuru-Guzik", "author": "Jos\u00e9 Miguel Hern\u00e1ndez-Lobato; James Requeima; Edward O. Pyzer-Knapp; Al\u00e1n Aspuru-Guzik", "abstract": "Chemical space is so large that brute force searches for new interesting molecules are infeasible. High-throughput virtual screening via computer cluster simulations can speed up the discovery process by collecting very large amounts of data in parallel, e.g., up to hundreds or thousands of parallel measurements. Bayesian optimization (BO) can produce additional acceleration by sequentially identifying the most useful simulations or experiments to be performed next. However, current BO methods cannot scale to the large numbers of parallel measurements and the massive libraries of molecules currently used in high-throughput screening. Here, we propose a scalable solution based on a parallel and distributed implementation of Thompson sampling (PDTS). We show that, in small scale problems, PDTS performs similarly as parallel expected improvement (EI), a batch version of the most widely used BO heuristic. Additionally, in settings where parallel EI does not scale, PDTS outperforms other scalable baselines such as a greedy search, $\\epsilon$-greedy approaches and a random search method. These results show that PDTS is a successful solution for large-scale parallel BO.", "bibtex": "@InProceedings{pmlr-v70-hernandez-lobato17a,\n title = \t {Parallel and Distributed Thompson Sampling for Large-scale Accelerated Exploration of Chemical Space},\n author = {Jos{\\'e} Miguel Hern{\\'a}ndez-Lobato and James Requeima and Edward O. Pyzer-Knapp and Al{\\'a}n Aspuru-Guzik},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1470--1479},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hernandez-lobato17a/hernandez-lobato17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hernandez-lobato17a.html},\n abstract = \t {Chemical space is so large that brute force searches for new interesting molecules are infeasible. High-throughput virtual screening via computer cluster simulations can speed up the discovery process by collecting very large amounts of data in parallel, e.g., up to hundreds or thousands of parallel measurements. Bayesian optimization (BO) can produce additional acceleration by sequentially identifying the most useful simulations or experiments to be performed next. However, current BO methods cannot scale to the large numbers of parallel measurements and the massive libraries of molecules currently used in high-throughput screening. Here, we propose a scalable solution based on a parallel and distributed implementation of Thompson sampling (PDTS). We show that, in small scale problems, PDTS performs similarly as parallel expected improvement (EI), a batch version of the most widely used BO heuristic. Additionally, in settings where parallel EI does not scale, PDTS outperforms other scalable baselines such as a greedy search, $\\epsilon$-greedy approaches and a random search method. These results show that PDTS is a successful solution for large-scale parallel BO.}\n}", "pdf": "http://proceedings.mlr.press/v70/hernandez-lobato17a/hernandez-lobato17a.pdf", "supp": "", "pdf_size": 1103712, "gs_citation": 239, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9374959473294906703&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Cambridge; University of Cambridge + Invenia Labs; Harvard University; Harvard University + IBM Research", "aff_domain": "cam.ac.uk; ;uk.ibm.com; ", "email": "cam.ac.uk; ;uk.ibm.com; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/hernandez-lobato17a.html", "aff_unique_index": "0;0+1;2;2+3", "aff_unique_norm": "University of Cambridge;Invenia Labs;Harvard University;IBM", "aff_unique_dep": ";;;IBM Research", "aff_unique_url": "https://www.cam.ac.uk;https://www.invenia.ca;https://www.harvard.edu;https://www.ibm.com/research", "aff_unique_abbr": "Cambridge;;Harvard;IBM", "aff_campus_unique_index": "0;0;", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0;0+1;2;2+2", "aff_country_unique": "United Kingdom;Canada;United States" }, { "title": "Parseval Networks: Improving Robustness to Adversarial Examples", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/830", "id": "830", "author_site": "Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, Nicolas Usunier", "author": "Moustapha Cisse; Piotr Bojanowski; Edouard Grave; Yann Dauphin; Nicolas Usunier", "abstract": "We introduce Parseval networks, a form of deep neural networks in which the Lipschitz constant of linear, convolutional and aggregation layers is constrained to be smaller than $1$. Parseval networks are empirically and theoretically motivated by an analysis of the robustness of the predictions made by deep neural networks when their input is subject to an adversarial perturbation. The most important feature of Parseval networks is to maintain weight matrices of linear and convolutional layers to be (approximately) Parseval tight frames, which are extensions of orthogonal matrices to non-square matrices. We describe how these constraints can be maintained efficiently during SGD. We show that Parseval networks match the state-of-the-art regarding accuracy on CIFAR-10/100 and Street View House Numbers (SVHN), while being more robust than their vanilla counterpart against adversarial examples. Incidentally, Parseval networks also tend to train faster and make a better usage of the full capacity of the networks.", "bibtex": "@InProceedings{pmlr-v70-cisse17a,\n title = \t {Parseval Networks: Improving Robustness to Adversarial Examples},\n author = {Moustapha Cisse and Piotr Bojanowski and Edouard Grave and Yann Dauphin and Nicolas Usunier},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {854--863},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cisse17a/cisse17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cisse17a.html},\n abstract = \t {We introduce Parseval networks, a form of deep neural networks in which the Lipschitz constant of linear, convolutional and aggregation layers is constrained to be smaller than $1$. Parseval networks are empirically and theoretically motivated by an analysis of the robustness of the predictions made by deep neural networks when their input is subject to an adversarial perturbation. The most important feature of Parseval networks is to maintain weight matrices of linear and convolutional layers to be (approximately) Parseval tight frames, which are extensions of orthogonal matrices to non-square matrices. We describe how these constraints can be maintained efficiently during SGD. We show that Parseval networks match the state-of-the-art regarding accuracy on CIFAR-10/100 and Street View House Numbers (SVHN), while being more robust than their vanilla counterpart against adversarial examples. Incidentally, Parseval networks also tend to train faster and make a better usage of the full capacity of the networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/cisse17a/cisse17a.pdf", "supp": "", "pdf_size": 461279, "gs_citation": 958, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11979400639430622244&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com; ; ; ; ", "email": "fb.com; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/cisse17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Partitioned Tensor Factorizations for Learning Mixed Membership Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/551", "id": "551", "author_site": "Zilong Tan, Sayan Mukherjee", "author": "Zilong Tan; Sayan Mukherjee", "abstract": "We present an efficient algorithm for learning mixed membership models when the number of variables p is much larger than the number of hidden components k. This algorithm reduces the computational complexity of state-of-the-art tensor methods, which require decomposing an $O(p^3)$ tensor, to factorizing $O(p/k)$ sub-tensors each of size $O(k^3)$. In addition, we address the issue of negative entries in the empirical method of moments based estimators. We provide sufficient conditions under which our approach has provable guarantees. Our approach obtains competitive empirical results on both simulated and real data.", "bibtex": "@InProceedings{pmlr-v70-tan17a,\n title = \t {Partitioned Tensor Factorizations for Learning Mixed Membership Models},\n author = {Zilong Tan and Sayan Mukherjee},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3358--3367},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/tan17a/tan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/tan17a.html},\n abstract = \t {We present an efficient algorithm for learning mixed membership models when the number of variables p is much larger than the number of hidden components k. This algorithm reduces the computational complexity of state-of-the-art tensor methods, which require decomposing an $O(p^3)$ tensor, to factorizing $O(p/k)$ sub-tensors each of size $O(k^3)$. In addition, we address the issue of negative entries in the empirical method of moments based estimators. We provide sufficient conditions under which our approach has provable guarantees. Our approach obtains competitive empirical results on both simulated and real data.}\n}", "pdf": "http://proceedings.mlr.press/v70/tan17a/tan17a.pdf", "supp": "", "pdf_size": 314442, "gs_citation": 2, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14005786723298291330&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Duke University; Duke University", "aff_domain": "duke.edu;stat.duke.edu", "email": "duke.edu;stat.duke.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/tan17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "PixelCNN Models with Auxiliary Variables for Natural Image Modeling", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/606", "id": "606", "author_site": "Alexander Kolesnikov, Christoph H. Lampert", "author": "Alexander Kolesnikov; Christoph H. Lampert", "abstract": "We study probabilistic models of natural images and extend the autoregressive family of PixelCNN models by incorporating auxiliary variables. Subsequently, we describe two new generative image models that exploit different image transformations as auxiliary variables: a quantized grayscale view of the image or a multi-resolution image pyramid. The proposed models tackle two known shortcomings of existing PixelCNN models: 1) their tendency to focus on low-level image details, while largely ignoring high-level image information, such as object shapes, and 2) their computationally costly procedure for image sampling. We experimentally demonstrate benefits of our models, in particular showing that they produce much more realistically looking image samples than previous state-of-the-art probabilistic models.", "bibtex": "@InProceedings{pmlr-v70-kolesnikov17a,\n title = \t {{P}ixel{CNN} Models with Auxiliary Variables for Natural Image Modeling},\n author = {Alexander Kolesnikov and Christoph H. Lampert},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1905--1914},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kolesnikov17a/kolesnikov17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kolesnikov17a.html},\n abstract = \t {We study probabilistic models of natural images and extend the autoregressive family of PixelCNN models by incorporating auxiliary variables. Subsequently, we describe two new generative image models that exploit different image transformations as auxiliary variables: a quantized grayscale view of the image or a multi-resolution image pyramid. The proposed models tackle two known shortcomings of existing PixelCNN models: 1) their tendency to focus on low-level image details, while largely ignoring high-level image information, such as object shapes, and 2) their computationally costly procedure for image sampling. We experimentally demonstrate benefits of our models, in particular showing that they produce much more realistically looking image samples than previous state-of-the-art probabilistic models.}\n}", "pdf": "http://proceedings.mlr.press/v70/kolesnikov17a/kolesnikov17a.pdf", "supp": "", "pdf_size": 1267999, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14220055903053333819&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "IST Austria, Klosterneuburg, Austria; IST Austria, Klosterneuburg, Austria", "aff_domain": "ist.ac.at; ", "email": "ist.ac.at; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/kolesnikov17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Institute of Science and Technology Austria", "aff_unique_dep": "", "aff_unique_url": "https://www.ist.ac.at", "aff_unique_abbr": "IST Austria", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Klosterneuburg", "aff_country_unique_index": "0;0", "aff_country_unique": "Austria" }, { "title": "Post-Inference Prior Swapping", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/532", "id": "532", "author_site": "Willie Neiswanger, Eric Xing", "author": "Willie Neiswanger; Eric Xing", "abstract": "While Bayesian methods are praised for their ability to incorporate useful prior knowledge, in practice, convenient priors that allow for computationally cheap or tractable inference are commonly used. In this paper, we investigate the following question: for a given model, is it possible to compute an inference result with any convenient false prior, and afterwards, given any target prior of interest, quickly transform this result into the target posterior? A potential solution is to use importance sampling (IS). However, we demonstrate that IS will fail for many choices of the target prior, depending on its parametric form and similarity to the false prior. Instead, we propose prior swapping, a method that leverages the pre-inferred false posterior to efficiently generate accurate posterior samples under arbitrary target priors. Prior swapping lets us apply less-costly inference algorithms to certain models, and incorporate new or updated prior information \u201cpost-inference\u201d. We give theoretical guarantees about our method, and demonstrate it empirically on a number of models and priors.", "bibtex": "@InProceedings{pmlr-v70-neiswanger17a,\n title = \t {Post-Inference Prior Swapping},\n author = {Willie Neiswanger and Eric Xing},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2594--2602},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/neiswanger17a/neiswanger17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/neiswanger17a.html},\n abstract = \t {While Bayesian methods are praised for their ability to incorporate useful prior knowledge, in practice, convenient priors that allow for computationally cheap or tractable inference are commonly used. In this paper, we investigate the following question: for a given model, is it possible to compute an inference result with any convenient false prior, and afterwards, given any target prior of interest, quickly transform this result into the target posterior? A potential solution is to use importance sampling (IS). However, we demonstrate that IS will fail for many choices of the target prior, depending on its parametric form and similarity to the false prior. Instead, we propose prior swapping, a method that leverages the pre-inferred false posterior to efficiently generate accurate posterior samples under arbitrary target priors. Prior swapping lets us apply less-costly inference algorithms to certain models, and incorporate new or updated prior information \u201cpost-inference\u201d. We give theoretical guarantees about our method, and demonstrate it empirically on a number of models and priors.}\n}", "pdf": "http://proceedings.mlr.press/v70/neiswanger17a/neiswanger17a.pdf", "supp": "", "pdf_size": 1215190, "gs_citation": 11, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2252446608507872089&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Carnegie Mellon University, Machine Learning Department, Pittsburgh, USA; CMU School of Computer Science", "aff_domain": "cs.cmu.edu; ", "email": "cs.cmu.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/neiswanger17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Machine Learning Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Practical Gauss-Newton Optimisation for Deep Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/789", "id": "789", "author_site": "Aleksandar Botev, Hippolyt Ritter, David Barber", "author": "Aleksandar Botev; Hippolyt Ritter; David Barber", "abstract": "We present an efficient block-diagonal approximation to the Gauss-Newton matrix for feedforward neural networks. Our resulting algorithm is competitive against state-of-the-art first-order optimisation methods, with sometimes significant improvement in optimisation performance. Unlike first-order methods, for which hyperparameter tuning of the optimisation parameters is often a laborious process, our approach can provide good performance even when used with default settings. A side result of our work is that for piecewise linear transfer functions, the network objective function can have no differentiable local maxima, which may partially explain why such transfer functions facilitate effective optimisation.", "bibtex": "@InProceedings{pmlr-v70-botev17a,\n title = \t {Practical {G}auss-{N}ewton Optimisation for Deep Learning},\n author = {Aleksandar Botev and Hippolyt Ritter and David Barber},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {557--565},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/botev17a/botev17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/botev17a.html},\n abstract = \t {We present an efficient block-diagonal approximation to the Gauss-Newton matrix for feedforward neural networks. Our resulting algorithm is competitive against state-of-the-art first-order optimisation methods, with sometimes significant improvement in optimisation performance. Unlike first-order methods, for which hyperparameter tuning of the optimisation parameters is often a laborious process, our approach can provide good performance even when used with default settings. A side result of our work is that for piecewise linear transfer functions, the network objective function can have no differentiable local maxima, which may partially explain why such transfer functions facilitate effective optimisation.}\n}", "pdf": "http://proceedings.mlr.press/v70/botev17a/botev17a.pdf", "supp": "", "pdf_size": 968988, "gs_citation": 295, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11308249932137757328&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "University College London; University College London; University College London + Alan Turing Institute", "aff_domain": "cs.ucl.ac.uk; ; ", "email": "cs.ucl.ac.uk; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/botev17a.html", "aff_unique_index": "0;0;0+1", "aff_unique_norm": "University College London;Alan Turing Institute", "aff_unique_dep": ";", "aff_unique_url": "https://www.ucl.ac.uk;https://www.turing.ac.uk", "aff_unique_abbr": "UCL;ATI", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United Kingdom" }, { "title": "Prediction and Control with Temporal Segment Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/657", "id": "657", "author_site": "Nikhil Mishra, Pieter Abbeel, Igor Mordatch", "author": "Nikhil Mishra; Pieter Abbeel; Igor Mordatch", "abstract": "We introduce a method for learning the dynamics of complex nonlinear systems based on deep generative models over temporal segments of states and actions. Unlike dynamics models that operate over individual discrete timesteps, we learn the distribution over future state trajectories conditioned on past state, past action, and planned future action trajectories, as well as a latent prior over action trajectories. Our approach is based on convolutional autoregressive models and variational autoencoders. It makes stable and accurate predictions over long horizons for complex, stochastic systems, effectively expressing uncertainty and modeling the effects of collisions, sensory noise, and action delays. The learned dynamics model and action prior can be used for end-to-end, fully differentiable trajectory optimization and model-based policy optimization, which we use to evaluate the performance and sample-efficiency of our method.", "bibtex": "@InProceedings{pmlr-v70-mishra17a,\n title = \t {Prediction and Control with Temporal Segment Models},\n author = {Nikhil Mishra and Pieter Abbeel and Igor Mordatch},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2459--2468},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mishra17a/mishra17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mishra17a.html},\n abstract = \t {We introduce a method for learning the dynamics of complex nonlinear systems based on deep generative models over temporal segments of states and actions. Unlike dynamics models that operate over individual discrete timesteps, we learn the distribution over future state trajectories conditioned on past state, past action, and planned future action trajectories, as well as a latent prior over action trajectories. Our approach is based on convolutional autoregressive models and variational autoencoders. It makes stable and accurate predictions over long horizons for complex, stochastic systems, effectively expressing uncertainty and modeling the effects of collisions, sensory noise, and action delays. The learned dynamics model and action prior can be used for end-to-end, fully differentiable trajectory optimization and model-based policy optimization, which we use to evaluate the performance and sample-efficiency of our method.}\n}", "pdf": "http://proceedings.mlr.press/v70/mishra17a/mishra17a.pdf", "supp": "", "pdf_size": 870430, "gs_citation": 78, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16312310769728514187&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of California, Berkeley; University of California, Berkeley + OpenAI; OpenAI", "aff_domain": "berkeley.edu; ; ", "email": "berkeley.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mishra17a.html", "aff_unique_index": "0;0+1;1", "aff_unique_norm": "University of California, Berkeley;OpenAI", "aff_unique_dep": ";", "aff_unique_url": "https://www.berkeley.edu;https://openai.com", "aff_unique_abbr": "UC Berkeley;OpenAI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0+0;0", "aff_country_unique": "United States" }, { "title": "Prediction under Uncertainty in Sparse Spectrum Gaussian Processes with Applications to Filtering and Control", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/786", "id": "786", "author_site": "Yunpeng Pan, Xinyan Yan, Evangelos Theodorou, Byron Boots", "author": "Yunpeng Pan; Xinyan Yan; Evangelos A. Theodorou; Byron Boots", "abstract": "Sparse Spectrum Gaussian Processes (SSGPs) are a powerful tool for scaling Gaussian processes (GPs) to large datasets. Existing SSGP algorithms for regression assume deterministic inputs, precluding their use in many real-world robotics and engineering applications where accounting for input uncertainty is crucial. We address this problem by proposing two analytic moment-based approaches with closed-form expressions for SSGP regression with uncertain inputs. Our methods are more general and scalable than their standard GP counterparts, and are naturally applicable to multi-step prediction or uncertainty propagation. We show that efficient algorithms for Bayesian filtering and stochastic model predictive control can use these methods, and we evaluate our algorithms with comparative analyses and both real-world and simulated experiments.", "bibtex": "@InProceedings{pmlr-v70-pan17a,\n title = \t {Prediction under Uncertainty in Sparse Spectrum {G}aussian Processes with Applications to Filtering and Control},\n author = {Yunpeng Pan and Xinyan Yan and Evangelos A. Theodorou and Byron Boots},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2760--2768},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pan17a/pan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pan17a.html},\n abstract = \t {Sparse Spectrum Gaussian Processes (SSGPs) are a powerful tool for scaling Gaussian processes (GPs) to large datasets. Existing SSGP algorithms for regression assume deterministic inputs, precluding their use in many real-world robotics and engineering applications where accounting for input uncertainty is crucial. We address this problem by proposing two analytic moment-based approaches with closed-form expressions for SSGP regression with uncertain inputs. Our methods are more general and scalable than their standard GP counterparts, and are naturally applicable to multi-step prediction or uncertainty propagation. We show that efficient algorithms for Bayesian filtering and stochastic model predictive control can use these methods, and we evaluate our algorithms with comparative analyses and both real-world and simulated experiments.}\n}", "pdf": "http://proceedings.mlr.press/v70/pan17a/pan17a.pdf", "supp": "", "pdf_size": 2821985, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3647505711451547935&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Georgia Institute of Technology, Atlanta, Georgia, USA+School of Aerospace Engineering+School of Interactive Computing; Georgia Institute of Technology, Atlanta, Georgia, USA+School of Aerospace Engineering+School of Interactive Computing; Georgia Institute of Technology, Atlanta, Georgia, USA+School of Aerospace Engineering+School of Interactive Computing; Georgia Institute of Technology, Atlanta, Georgia, USA+School of Aerospace Engineering+School of Interactive Computing", "aff_domain": "gatech.edu; ; ; ", "email": "gatech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/pan17a.html", "aff_unique_index": "0+1+0;0+1+0;0+1+0;0+1+0", "aff_unique_norm": "Georgia Institute of Technology;School of Aerospace Engineering", "aff_unique_dep": ";Aerospace Engineering", "aff_unique_url": "https://www.gatech.edu;", "aff_unique_abbr": "Georgia Tech;", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Atlanta;", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "United States;" }, { "title": "Preferential Bayesian Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/565", "id": "565", "author_site": "Javier Gonz\u00e1lez, Zhenwen Dai, Andreas Damianou, Neil Lawrence", "author": "Javier Gonz\u00e1lez; Zhenwen Dai; Andreas Damianou; Neil D. Lawrence", "abstract": "Bayesian optimization (BO) has emerged during the last few years as an effective approach to optimize black-box functions where direct queries of the objective are expensive. We consider the case where direct access to the function is not possible, but information about user preferences is. Such scenarios arise in problems where human preferences are modeled, such as A/B tests or recommender systems. We present a new framework for this scenario that we call Preferential Bayesian Optimization (PBO) and that allows to find the optimum of a latent function that can only be queried through pairwise comparisons, so-called duels. PBO extend the applicability of standard BO ideas and generalizes previous discrete dueling approaches by modeling the probability of the the winner of each duel by means of Gaussian process model with a Bernoulli likelihood. The latent preference function is used to define a family of acquisition functions that extend usual policies used in BO. We illustrate the benefits of PBO in a variety of experiments in which we show how the way correlations are modeled is the key ingredient to drastically reduce the number of comparisons to find the optimum of the latent function of interest.", "bibtex": "@InProceedings{pmlr-v70-gonzalez17a,\n title = \t {Preferential {B}ayesian Optimization},\n author = {Javier Gonz{\\'a}lez and Zhenwen Dai and Andreas Damianou and Neil D. Lawrence},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1282--1291},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gonzalez17a/gonzalez17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gonzalez17a.html},\n abstract = \t {Bayesian optimization (BO) has emerged during the last few years as an effective approach to optimize black-box functions where direct queries of the objective are expensive. We consider the case where direct access to the function is not possible, but information about user preferences is. Such scenarios arise in problems where human preferences are modeled, such as A/B tests or recommender systems. We present a new framework for this scenario that we call Preferential Bayesian Optimization (PBO) and that allows to find the optimum of a latent function that can only be queried through pairwise comparisons, so-called duels. PBO extend the applicability of standard BO ideas and generalizes previous discrete dueling approaches by modeling the probability of the the winner of each duel by means of Gaussian process model with a Bernoulli likelihood. The latent preference function is used to define a family of acquisition functions that extend usual policies used in BO. We illustrate the benefits of PBO in a variety of experiments in which we show how the way correlations are modeled is the key ingredient to drastically reduce the number of comparisons to find the optimum of the latent function of interest.}\n}", "pdf": "http://proceedings.mlr.press/v70/gonzalez17a/gonzalez17a.pdf", "supp": "", "pdf_size": 1529517, "gs_citation": 141, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5438612034483412706&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Amazon Research Cambridge, UK+University of Shef\ufb01eld, UK; Amazon Research Cambridge, UK; Amazon Research Cambridge, UK; University of Shef\ufb01eld, UK", "aff_domain": "amazon.com; ; ; ", "email": "amazon.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/gonzalez17a.html", "aff_unique_index": "0+1;0;0;1", "aff_unique_norm": "Amazon;University of Sheffield", "aff_unique_dep": "Amazon Research;", "aff_unique_url": "https://www.amazon.science;https://www.sheffield.ac.uk", "aff_unique_abbr": "Amazon;Sheffield", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge;", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Priv\u2019IT: Private and Sample Efficient Identity Testing", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/459", "id": "459", "author_site": "Bryan Cai, Constantinos Daskalakis, Gautam Kamath", "author": "Bryan Cai; Constantinos Daskalakis; Gautam Kamath", "abstract": "We develop differentially private hypothesis testing methods for the small sample regime. Given a sample $\\mathcal{D}$ from a categorical distribution $p$ over some domain $\\Sigma$, an explicitly described distribution $q$ over $\\Sigma$, some privacy parameter $\\epsilon$, accuracy parameter $\\alpha$, and requirements $\\beta_\\mathrm{I}$ and $\\beta_\\mathrm{II}$ for the type I and type II errors of our test, the goal is to distinguish between $p=q$ and $d_\\mathrm{tv}(p,q) \\ge \\alpha$. We provide theoretical bounds for the sample size $|\\mathcal{D}|$ so that our method both satisfies $(\\epsilon,0)$-differential privacy, and guarantees $\\beta_\\mathrm{I}$ and $\\beta_\\mathrm{II}$ type I and type II errors. We show that differential privacy may come for free in some regimes of parameters, and we always beat the sample complexity resulting from running the $\\chi^2$-test with noisy counts, or standard approaches such as repetition for endowing non-private $\\chi^2$-style statistics with differential privacy guarantees. We experimentally compare the sample complexity of our method to that of recently proposed methods for private hypothesis testing.", "bibtex": "@InProceedings{pmlr-v70-cai17a,\n title = \t {{P}riv'{IT}: Private and Sample Efficient Identity Testing},\n author = {Bryan Cai and Constantinos Daskalakis and Gautam Kamath},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {635--644},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cai17a/cai17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cai17a.html},\n abstract = \t {We develop differentially private hypothesis testing methods for the small sample regime. Given a sample $\\mathcal{D}$ from a categorical distribution $p$ over some domain $\\Sigma$, an explicitly described distribution $q$ over $\\Sigma$, some privacy parameter $\\epsilon$, accuracy parameter $\\alpha$, and requirements $\\beta_\\mathrm{I}$ and $\\beta_\\mathrm{II}$ for the type I and type II errors of our test, the goal is to distinguish between $p=q$ and $d_\\mathrm{tv}(p,q) \\ge \\alpha$. We provide theoretical bounds for the sample size $|\\mathcal{D}|$ so that our method both satisfies $(\\epsilon,0)$-differential privacy, and guarantees $\\beta_\\mathrm{I}$ and $\\beta_\\mathrm{II}$ type I and type II errors. We show that differential privacy may come for free in some regimes of parameters, and we always beat the sample complexity resulting from running the $\\chi^2$-test with noisy counts, or standard approaches such as repetition for endowing non-private $\\chi^2$-style statistics with differential privacy guarantees. We experimentally compare the sample complexity of our method to that of recently proposed methods for private hypothesis testing.}\n}", "pdf": "http://proceedings.mlr.press/v70/cai17a/cai17a.pdf", "supp": "", "pdf_size": 477524, "gs_citation": 65, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11460872932765116781&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu;csail.mit.edu;csail.mit.edu", "email": "mit.edu;csail.mit.edu;csail.mit.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/cai17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Probabilistic Path Hamiltonian Monte Carlo", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/615", "id": "615", "author_site": "Vu Dinh, Arman Bilge, Cheng Zhang, Frederick Matsen", "author": "Vu Dinh; Arman Bilge; Cheng Zhang; Frederick A. Matsen IV", "abstract": "Hamiltonian Monte Carlo (HMC) is an efficient and effective means of sampling posterior distributions on Euclidean space, which has been extended to manifolds with boundary. However, some applications require an extension to more general spaces. For example, phylogenetic (evolutionary) trees are defined in terms of both a discrete graph and associated continuous parameters; although one can represent these aspects using a single connected space, this rather complex space is not suitable for existing HMC algorithms. In this paper, we develop Probabilistic Path HMC (PPHMC) as a first step to sampling distributions on spaces with intricate combinatorial structure. We define PPHMC on orthant complexes, show that the resulting Markov chain is ergodic, and provide a promising implementation for the case of phylogenetic trees in open-source software. We also show that a surrogate function to ease the transition across a boundary on which the log-posterior has discontinuous derivatives can greatly improve efficiency.", "bibtex": "@InProceedings{pmlr-v70-dinh17a,\n title = \t {Probabilistic Path {H}amiltonian {M}onte {C}arlo},\n author = {Vu Dinh and Arman Bilge and Cheng Zhang and Matsen, IV, Frederick A.},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1009--1018},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dinh17a/dinh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dinh17a.html},\n abstract = \t {Hamiltonian Monte Carlo (HMC) is an efficient and effective means of sampling posterior distributions on Euclidean space, which has been extended to manifolds with boundary. However, some applications require an extension to more general spaces. For example, phylogenetic (evolutionary) trees are defined in terms of both a discrete graph and associated continuous parameters; although one can represent these aspects using a single connected space, this rather complex space is not suitable for existing HMC algorithms. In this paper, we develop Probabilistic Path HMC (PPHMC) as a first step to sampling distributions on spaces with intricate combinatorial structure. We define PPHMC on orthant complexes, show that the resulting Markov chain is ergodic, and provide a promising implementation for the case of phylogenetic trees in open-source software. We also show that a surrogate function to ease the transition across a boundary on which the log-posterior has discontinuous derivatives can greatly improve efficiency.}\n}", "pdf": "http://proceedings.mlr.press/v70/dinh17a/dinh17a.pdf", "supp": "", "pdf_size": 449481, "gs_citation": 63, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13323621738483369464&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Program in Computational Biology, Fred Hutchison Cancer Research Center, Seattle, WA, USA+Department of Statistics, University of Washington, Seattle, WA, USA; Program in Computational Biology, Fred Hutchison Cancer Research Center, Seattle, WA, USA+Department of Statistics, University of Washington, Seattle, WA, USA; Program in Computational Biology, Fred Hutchison Cancer Research Center, Seattle, WA, USA; Program in Computational Biology, Fred Hutchison Cancer Research Center, Seattle, WA, USA", "aff_domain": "fredhutch.org;fredhutch.org;fredhutch.org;fredhutch.org", "email": "fredhutch.org;fredhutch.org;fredhutch.org;fredhutch.org", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/dinh17a.html", "aff_unique_index": "0+1;0+1;0;0", "aff_unique_norm": "Fred Hutchinson Cancer Research Center;University of Washington", "aff_unique_dep": "Program in Computational Biology;Department of Statistics", "aff_unique_url": "https://www.fredhutch.org;https://www.washington.edu", "aff_unique_abbr": "Fred Hutch;UW", "aff_campus_unique_index": "0+0;0+0;0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0+0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Probabilistic Submodular Maximization in Sub-Linear Time", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/840", "id": "840", "author_site": "Serban A Stan, Morteza Zadimoghaddam, Andreas Krause, Amin Karbasi", "author": "Serban Stan; Morteza Zadimoghaddam; Andreas Krause; Amin Karbasi", "abstract": "In this paper, we consider optimizing submodular functions that are drawn from some unknown distribution. This setting arises, e.g., in recommender systems, where the utility of a subset of items may depend on a user-specific submodular utility function. In modern applications, the ground set of items is often so large that even the widely used (lazy) greedy algorithm is not efficient enough. As a remedy, we introduce the problem of sublinear time probabilistic submodular maximization: Given training examples of functions (e.g., via user feature vectors), we seek to reduce the ground set so that optimizing new functions drawn from the same distribution will provide almost as much value when restricted to the reduced ground set as when using the full set. We cast this problem as a two-stage submodular maximization and develop a novel efficient algorithm for this problem which offers $1/2(1 - 1/e^2)$ approximation ratio for general monotone submodular functions and general matroid constraints. We demonstrate the effectiveness of our approach on several real-world applications where running the maximization problem on the reduced ground set leads to two orders of magnitude speed-up while incurring almost no loss.", "bibtex": "@InProceedings{pmlr-v70-stan17a,\n title = \t {Probabilistic Submodular Maximization in Sub-Linear Time},\n author = {Serban Stan and Morteza Zadimoghaddam and Andreas Krause and Amin Karbasi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3241--3250},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/stan17a/stan17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/stan17a.html},\n abstract = \t {In this paper, we consider optimizing submodular functions that are drawn from some unknown distribution. This setting arises, e.g., in recommender systems, where the utility of a subset of items may depend on a user-specific submodular utility function. In modern applications, the ground set of items is often so large that even the widely used (lazy) greedy algorithm is not efficient enough. As a remedy, we introduce the problem of sublinear time probabilistic submodular maximization: Given training examples of functions (e.g., via user feature vectors), we seek to reduce the ground set so that optimizing new functions drawn from the same distribution will provide almost as much value when restricted to the reduced ground set as when using the full set. We cast this problem as a two-stage submodular maximization and develop a novel efficient algorithm for this problem which offers $1/2(1 - 1/e^2)$ approximation ratio for general monotone submodular functions and general matroid constraints. We demonstrate the effectiveness of our approach on several real-world applications where running the maximization problem on the reduced ground set leads to two orders of magnitude speed-up while incurring almost no loss.}\n}", "pdf": "http://proceedings.mlr.press/v70/stan17a/stan17a.pdf", "supp": "", "pdf_size": 1374885, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12598297021347590324&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Yale University; Google Research; ETH Zurich; Yale University", "aff_domain": "yale.edu;google.com;ethz.ch;yale.edu", "email": "yale.edu;google.com;ethz.ch;yale.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/stan17a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "Yale University;Google;ETH Zurich", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.yale.edu;https://research.google;https://www.ethz.ch", "aff_unique_abbr": "Yale;Google Research;ETHZ", "aff_campus_unique_index": "1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;1;0", "aff_country_unique": "United States;Switzerland" }, { "title": "Programming with a Differentiable Forth Interpreter", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/809", "id": "809", "author_site": "Matko Bo\u0161njak, Tim Rockt\u00e4schel, Jason Naradowsky, Sebastian Riedel", "author": "Matko Bo\u0161njak; Tim Rockt\u00e4schel; Jason Naradowsky; Sebastian Riedel", "abstract": "Given that in practice training data is scarce for all but a small set of problems, a core question is how to incorporate prior knowledge into a model. In this paper, we consider the case of prior procedural knowledge for neural networks, such as knowing how a program should traverse a sequence, but not what local actions should be performed at each step. To this end, we present an end-to-end differentiable interpreter for the programming language Forth which enables programmers to write program sketches with slots that can be filled with behaviour trained from program input-output data. We can optimise this behaviour directly through gradient descent techniques on user-specified objectives, and also integrate the program into any larger neural computation graph. We show empirically that our interpreter is able to effectively leverage different levels of prior program structure and learn complex behaviours such as sequence sorting and addition. When connected to outputs of an LSTM and trained jointly, our interpreter achieves state-of-the-art accuracy for end-to-end reasoning about quantities expressed in natural language stories.", "bibtex": "@InProceedings{pmlr-v70-bosnjak17a,\n title = \t {Programming with a Differentiable Forth Interpreter},\n author = {Matko Bo{\\v{s}}njak and Tim Rockt{\\\"a}schel and Jason Naradowsky and Sebastian Riedel},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {547--556},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bosnjak17a/bosnjak17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bosnjak17a.html},\n abstract = \t {Given that in practice training data is scarce for all but a small set of problems, a core question is how to incorporate prior knowledge into a model. In this paper, we consider the case of prior procedural knowledge for neural networks, such as knowing how a program should traverse a sequence, but not what local actions should be performed at each step. To this end, we present an end-to-end differentiable interpreter for the programming language Forth which enables programmers to write program sketches with slots that can be filled with behaviour trained from program input-output data. We can optimise this behaviour directly through gradient descent techniques on user-specified objectives, and also integrate the program into any larger neural computation graph. We show empirically that our interpreter is able to effectively leverage different levels of prior program structure and learn complex behaviours such as sequence sorting and addition. When connected to outputs of an LSTM and trained jointly, our interpreter achieves state-of-the-art accuracy for end-to-end reasoning about quantities expressed in natural language stories.}\n}", "pdf": "http://proceedings.mlr.press/v70/bosnjak17a/bosnjak17a.pdf", "supp": "", "pdf_size": 510997, "gs_citation": 121, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=414036379914758548&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Computer Science, University College London, London, UK; Department of Computer Science, University of Oxford, Oxford, UK; Department of Theoretical and Applied Linguistics, University of Cambridge, Cambridge, UK; Department of Computer Science, University College London, London, UK", "aff_domain": "cs.ucl.ac.uk; ; ; ", "email": "cs.ucl.ac.uk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bosnjak17a.html", "aff_unique_index": "0;1;2;0", "aff_unique_norm": "University College London;University of Oxford;University of Cambridge", "aff_unique_dep": "Department of Computer Science;Department of Computer Science;Department of Theoretical and Applied Linguistics", "aff_unique_url": "https://www.ucl.ac.uk;https://www.ox.ac.uk;https://www.cam.ac.uk", "aff_unique_abbr": "UCL;Oxford;Cambridge", "aff_campus_unique_index": "0;1;2;0", "aff_campus_unique": "London;Oxford;Cambridge", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Projection-free Distributed Online Learning in Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/700", "id": "700", "author_site": "Wenpeng Zhang, Peilin Zhao, Wenwu Zhu, Steven Hoi, Tong Zhang", "author": "Wenpeng Zhang; Peilin Zhao; Wenwu Zhu; Steven C. H. Hoi; Tong Zhang", "abstract": "The conditional gradient algorithm has regained a surge of research interest in recent years due to its high efficiency in handling large-scale machine learning problems. However, none of existing studies has explored it in the distributed online learning setting, where locally light computation is assumed. In this paper, we fill this gap by proposing the distributed online conditional gradient algorithm, which eschews the expensive projection operation needed in its counterpart algorithms by exploiting much simpler linear optimization steps. We give a regret bound for the proposed algorithm as a function of the network size and topology, which will be smaller on smaller graphs or \u201cwell-connected\u201d graphs. Experiments on two large-scale real-world datasets for a multiclass classification task confirm the computational benefit of the proposed algorithm and also verify the theoretical regret bound.", "bibtex": "@InProceedings{pmlr-v70-zhang17g,\n title = \t {Projection-free Distributed Online Learning in Networks},\n author = {Wenpeng Zhang and Peilin Zhao and Wenwu Zhu and Steven C. H. Hoi and Tong Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4054--4062},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17g/zhang17g.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17g.html},\n abstract = \t {The conditional gradient algorithm has regained a surge of research interest in recent years due to its high efficiency in handling large-scale machine learning problems. However, none of existing studies has explored it in the distributed online learning setting, where locally light computation is assumed. In this paper, we fill this gap by proposing the distributed online conditional gradient algorithm, which eschews the expensive projection operation needed in its counterpart algorithms by exploiting much simpler linear optimization steps. We give a regret bound for the proposed algorithm as a function of the network size and topology, which will be smaller on smaller graphs or \u201cwell-connected\u201d graphs. Experiments on two large-scale real-world datasets for a multiclass classification task confirm the computational benefit of the proposed algorithm and also verify the theoretical regret bound.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17g/zhang17g.pdf", "supp": "", "pdf_size": 391279, "gs_citation": 91, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9315365894909212297&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Department of Computer Science and Technology, Tsinghua University, Beijing, China; Artificial Intelligence Department, Ant Financial Services Group, Hangzhou, China; Department of Computer Science and Technology, Tsinghua University, Beijing, China; School of Information Systems, Singapore Management University, Singapore; Tencent AI Lab, Shenzhen, China", "aff_domain": "gmail.com; ;tsinghua.edu.cn; ; ", "email": "gmail.com; ;tsinghua.edu.cn; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/zhang17g.html", "aff_unique_index": "0;1;0;2;3", "aff_unique_norm": "Tsinghua University;Ant Financial Services Group;Singapore Management University;Tencent", "aff_unique_dep": "Department of Computer Science and Technology;Artificial Intelligence Department;School of Information Systems;AI Lab", "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.antgroup.com;https://www.smu.edu.sg;https://ai.tencent.com", "aff_unique_abbr": "THU;Ant Financial;SMU;Tencent AI Lab", "aff_campus_unique_index": "0;1;0;3", "aff_campus_unique": "Beijing;Hangzhou;;Shenzhen", "aff_country_unique_index": "0;0;0;1;0", "aff_country_unique": "China;Singapore" }, { "title": "ProtoNN: Compressed and Accurate kNN for Resource-scarce Devices", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/683", "id": "683", "author_site": "Chirag Gupta, ARUN SUGGALA, Ankit Goyal, Saurabh Goyal, Ashish Kumar, Bhargavi Paranjape, Harsha Vardhan Simhadri, Raghavendra Udupa, Manik Varma, Prateek Jain", "author": "Chirag Gupta; Arun Sai Suggala; Ankit Goyal; Harsha Vardhan Simhadri; Bhargavi Paranjape; Ashish Kumar; Saurabh Goyal; Raghavendra Udupa; Manik Varma; Prateek Jain", "abstract": "Several real-world applications require real-time prediction on resource-scarce devices such as an Internet of Things (IoT) sensor. Such applications demand prediction models with small storage and computational complexity that do not compromise significantly on accuracy. In this work, we propose ProtoNN, a novel algorithm that addresses the problem of real-time and accurate prediction on resource-scarce devices. ProtoNN is inspired by k-Nearest Neighbor (KNN) but has several orders lower storage and prediction complexity. ProtoNN models can be deployed even on devices with puny storage and computational power (e.g. an Arduino UNO with 2kB RAM) to get excellent prediction accuracy. ProtoNN derives its strength from three key ideas: a) learning a small number of prototypes to represent the entire training set, b) sparse low dimensional projection of data, c) joint discriminative learning of the projection and prototypes with explicit model size constraint. We conduct systematic empirical evaluation of ProtoNN on a variety of supervised learning tasks (binary, multi-class, multi-label classification) and show that it gives nearly state-of-the-art prediction accuracy on resource-scarce devices while consuming several orders lower storage, and using minimal working memory.", "bibtex": "@InProceedings{pmlr-v70-gupta17a,\n title = \t {{P}roto{NN}: Compressed and Accurate k{NN} for Resource-scarce Devices},\n author = {Chirag Gupta and Arun Sai Suggala and Ankit Goyal and Harsha Vardhan Simhadri and Bhargavi Paranjape and Ashish Kumar and Saurabh Goyal and Raghavendra Udupa and Manik Varma and Prateek Jain},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1331--1340},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gupta17a/gupta17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gupta17a.html},\n abstract = \t {Several real-world applications require real-time prediction on resource-scarce devices such as an Internet of Things (IoT) sensor. Such applications demand prediction models with small storage and computational complexity that do not compromise significantly on accuracy. In this work, we propose ProtoNN, a novel algorithm that addresses the problem of real-time and accurate prediction on resource-scarce devices. ProtoNN is inspired by k-Nearest Neighbor (KNN) but has several orders lower storage and prediction complexity. ProtoNN models can be deployed even on devices with puny storage and computational power (e.g. an Arduino UNO with 2kB RAM) to get excellent prediction accuracy. ProtoNN derives its strength from three key ideas: a) learning a small number of prototypes to represent the entire training set, b) sparse low dimensional projection of data, c) joint discriminative learning of the projection and prototypes with explicit model size constraint. We conduct systematic empirical evaluation of ProtoNN on a variety of supervised learning tasks (binary, multi-class, multi-label classification) and show that it gives nearly state-of-the-art prediction accuracy on resource-scarce devices while consuming several orders lower storage, and using minimal working memory.}\n}", "pdf": "http://proceedings.mlr.press/v70/gupta17a/gupta17a.pdf", "supp": "", "pdf_size": 630797, "gs_citation": 227, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3616267009123779187&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 18, "aff": "Microsoft Research, India; Microsoft Research, India+ Carnegie Mellon University, Pittsburgh; Microsoft Research, India+ University of Michigan, Ann Arbor; Microsoft Research, India; Microsoft Research, India; Microsoft Research, India; IIT Delhi, India; Microsoft Research, India; Microsoft Research, India; Microsoft Research, India", "aff_domain": ";andrew.cmu.edu; ; ; ; ; ; ; ;microsoft.com", "email": ";andrew.cmu.edu; ; ; ; ; ; ; ;microsoft.com", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v70/gupta17a.html", "aff_unique_index": "0;0+1;0+2;0;0;0;3;0;0;0", "aff_unique_norm": "Microsoft;Carnegie Mellon University;University of Michigan;Indian Institute of Technology Delhi", "aff_unique_dep": "Microsoft Research;;;", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/india.aspx;https://www.cmu.edu;https://www.umich.edu;https://www.iitd.ac.in", "aff_unique_abbr": "MSR India;CMU;UM;IITD", "aff_campus_unique_index": "1;2;3", "aff_campus_unique": ";Pittsburgh;Ann Arbor;Delhi", "aff_country_unique_index": "0;0+1;0+1;0;0;0;0;0;0;0", "aff_country_unique": "India;United States" }, { "title": "Provable Alternating Gradient Descent for Non-negative Matrix Factorization with Strong Correlations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/635", "id": "635", "author_site": "Yuanzhi Li, Yingyu Liang", "author": "Yuanzhi Li; Yingyu Liang", "abstract": "Non-negative matrix factorization is a basic tool for decomposing data into the feature and weight matrices under non-negativity constraints, and in practice is often solved in the alternating minimization framework. However, it is unclear whether such algorithms can recover the ground-truth feature matrix when the weights for different features are highly correlated, which is common in applications. This paper proposes a simple and natural alternating gradient descent based algorithm, and shows that with a mild initialization it provably recovers the ground-truth in the presence of strong correlations. In most interesting cases, the correlation can be in the same order as the highest possible. Our analysis also reveals its several favorable features including robustness to noise. We complement our theoretical results with empirical studies on semi-synthetic datasets, demonstrating its advantage over several popular methods in recovering the ground-truth.", "bibtex": "@InProceedings{pmlr-v70-li17b,\n title = \t {Provable Alternating Gradient Descent for Non-negative Matrix Factorization with Strong Correlations},\n author = {Yuanzhi Li and Yingyu Liang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2062--2070},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17b/li17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17b.html},\n abstract = \t {Non-negative matrix factorization is a basic tool for decomposing data into the feature and weight matrices under non-negativity constraints, and in practice is often solved in the alternating minimization framework. However, it is unclear whether such algorithms can recover the ground-truth feature matrix when the weights for different features are highly correlated, which is common in applications. This paper proposes a simple and natural alternating gradient descent based algorithm, and shows that with a mild initialization it provably recovers the ground-truth in the presence of strong correlations. In most interesting cases, the correlation can be in the same order as the highest possible. Our analysis also reveals its several favorable features including robustness to noise. We complement our theoretical results with empirical studies on semi-synthetic datasets, demonstrating its advantage over several popular methods in recovering the ground-truth.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17b/li17b.pdf", "supp": "", "pdf_size": 444005, "gs_citation": 24, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11670249304983520289&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Princeton University; Princeton University", "aff_domain": "cs.princeton.edu;cs.princeton.edu", "email": "cs.princeton.edu;cs.princeton.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/li17b.html", "aff_unique_index": "0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Provably Optimal Algorithms for Generalized Linear Contextual Bandits", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/654", "id": "654", "author_site": "Lihong Li, Yu Lu, Dengyong Zhou", "author": "Lihong Li; Yu Lu; Dengyong Zhou", "abstract": "Contextual bandits are widely used in Internet services from news recommendation to advertising, and to Web search. Generalized linear models (logistical regression in particular) have demonstrated stronger performance than linear models in many applications where rewards are binary. However, most theoretical analyses on contextual bandits so far are on linear bandits. In this work, we propose an upper confidence bound based algorithm for generalized linear contextual bandits, which achieves an $\\sim O(\\sqrt{dT})$ regret over T rounds with d dimensional feature vectors. This regret matches the minimax lower bound, up to logarithmic terms, and improves on the best previous result by a $\\sqrt{d}$ factor, assuming the number of arms is fixed. A key component in our analysis is to establish a new, sharp finite-sample confidence bound for maximum likelihood estimates in generalized linear models, which may be of independent interest. We also analyze a simpler upper confidence bound algorithm, which is useful in practice, and prove it to have optimal regret for certain cases.", "bibtex": "@InProceedings{pmlr-v70-li17c,\n title = \t {Provably Optimal Algorithms for Generalized Linear Contextual Bandits},\n author = {Lihong Li and Yu Lu and Dengyong Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2071--2080},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17c/li17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17c.html},\n abstract = \t {Contextual bandits are widely used in Internet services from news recommendation to advertising, and to Web search. Generalized linear models (logistical regression in particular) have demonstrated stronger performance than linear models in many applications where rewards are binary. However, most theoretical analyses on contextual bandits so far are on linear bandits. In this work, we propose an upper confidence bound based algorithm for generalized linear contextual bandits, which achieves an $\\sim O(\\sqrt{dT})$ regret over T rounds with d dimensional feature vectors. This regret matches the minimax lower bound, up to logarithmic terms, and improves on the best previous result by a $\\sqrt{d}$ factor, assuming the number of arms is fixed. A key component in our analysis is to establish a new, sharp finite-sample confidence bound for maximum likelihood estimates in generalized linear models, which may be of independent interest. We also analyze a simpler upper confidence bound algorithm, which is useful in practice, and prove it to have optimal regret for certain cases.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17c/li17c.pdf", "supp": "", "pdf_size": 319700, "gs_citation": 402, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5979099763262787664&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Microsoft Research, Redmond, WA 98052; Department of Statistics, Yale University, New Haven, CT, USA; Microsoft Research, Redmond, WA 98052", "aff_domain": "microsoft.com;yale.edu;microsoft.com", "email": "microsoft.com;yale.edu;microsoft.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/li17c.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Microsoft;Yale University", "aff_unique_dep": "Microsoft Research;Department of Statistics", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.yale.edu", "aff_unique_abbr": "MSR;Yale", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Redmond;New Haven", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Prox-PDA: The Proximal Primal-Dual Algorithm for Fast Distributed Nonconvex Optimization and Learning Over Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/749", "id": "749", "author_site": "Mingyi Hong, Davood Hajinezhad, Ming-Min Zhao", "author": "Mingyi Hong; Davood Hajinezhad; Ming-Min Zhao", "abstract": "In this paper we consider nonconvex optimization and learning over a network of distributed nodes. We develop a Proximal Primal-Dual Algorithm (Prox-PDA), which enables the network nodes to distributedly and collectively compute the set of first-order stationary solutions in a global sublinear manner [with a rate of $O(1/r)$, where $r$ is the iteration counter]. To the best of our knowledge, this is the first algorithm that enables distributed nonconvex optimization with global rate guarantees. Our numerical experiments also demonstrate the effectiveness of the proposed algorithm.", "bibtex": "@InProceedings{pmlr-v70-hong17a,\n title = \t {Prox-{PDA}: The Proximal Primal-Dual Algorithm for Fast Distributed Nonconvex Optimization and Learning Over Networks},\n author = {Mingyi Hong and Davood Hajinezhad and Ming-Min Zhao},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1529--1538},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hong17a/hong17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hong17a.html},\n abstract = \t {In this paper we consider nonconvex optimization and learning over a network of distributed nodes. We develop a Proximal Primal-Dual Algorithm (Prox-PDA), which enables the network nodes to distributedly and collectively compute the set of first-order stationary solutions in a global sublinear manner [with a rate of $O(1/r)$, where $r$ is the iteration counter]. To the best of our knowledge, this is the first algorithm that enables distributed nonconvex optimization with global rate guarantees. Our numerical experiments also demonstrate the effectiveness of the proposed algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v70/hong17a/hong17a.pdf", "supp": "", "pdf_size": 770395, "gs_citation": 178, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15524674027711784601&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Department of Industrial and Manufacturing Systems Engineering, Iowa State University, Ames, IA, USA; Department of Industrial and Manufacturing Systems Engineering, Iowa State University, Ames, IA, USA; College of Information Science and Electronic Engineering, Zhejiang University, China", "aff_domain": "iastate.edu;iastate.edu;zju.edu.cn", "email": "iastate.edu;iastate.edu;zju.edu.cn", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hong17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Iowa State University;Zhejiang University", "aff_unique_dep": "Department of Industrial and Manufacturing Systems Engineering;College of Information Science and Electronic Engineering", "aff_unique_url": "https://www.iastate.edu;http://www.zju.edu.cn", "aff_unique_abbr": "ISU;ZJU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Ames;", "aff_country_unique_index": "0;0;1", "aff_country_unique": "United States;China" }, { "title": "Random Feature Expansions for Deep Gaussian Processes", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/576", "id": "576", "author_site": "Kurt Cutajar, Edwin Bonilla, Pietro Michiardi, Maurizio Filippone", "author": "Kurt Cutajar; Edwin V. Bonilla; Pietro Michiardi; Maurizio Filippone", "abstract": "The composition of multiple Gaussian Processes as a Deep Gaussian Process DGP enables a deep probabilistic nonparametric approach to flexibly tackle complex machine learning problems with sound quantification of uncertainty. Existing inference approaches for DGP models have limited scalability and are notoriously cumbersome to construct. In this work we introduce a novel formulation of DGPs based on random feature expansions that we train using stochastic variational inference. This yields a practical learning framework which significantly advances the state-of-the-art in inference for DGPs, and enables accurate quantification of uncertainty. We extensively showcase the scalability and performance of our proposal on several datasets with up to 8 million observations, and various DGP architectures with up to 30 hidden layers.", "bibtex": "@InProceedings{pmlr-v70-cutajar17a,\n title = \t {Random Feature Expansions for Deep {G}aussian Processes},\n author = {Kurt Cutajar and Edwin V. Bonilla and Pietro Michiardi and Maurizio Filippone},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {884--893},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cutajar17a/cutajar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cutajar17a.html},\n abstract = \t {The composition of multiple Gaussian Processes as a Deep Gaussian Process DGP enables a deep probabilistic nonparametric approach to flexibly tackle complex machine learning problems with sound quantification of uncertainty. Existing inference approaches for DGP models have limited scalability and are notoriously cumbersome to construct. In this work we introduce a novel formulation of DGPs based on random feature expansions that we train using stochastic variational inference. This yields a practical learning framework which significantly advances the state-of-the-art in inference for DGPs, and enables accurate quantification of uncertainty. We extensively showcase the scalability and performance of our proposal on several datasets with up to 8 million observations, and various DGP architectures with up to 30 hidden layers.}\n}", "pdf": "http://proceedings.mlr.press/v70/cutajar17a/cutajar17a.pdf", "supp": "", "pdf_size": 171740, "gs_citation": 180, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=442047037341375172&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Data Science, EURECOM, France; School of Computer Science and Engineering, University of New South Wales, Australia; Department of Data Science, EURECOM, France; Department of Data Science, EURECOM, France", "aff_domain": "eurecom.fr;unsw.edu.au;eurecom.fr;eurecom.fr", "email": "eurecom.fr;unsw.edu.au;eurecom.fr;eurecom.fr", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/cutajar17a.html", "aff_unique_index": "0;1;0;0", "aff_unique_norm": "EURECOM;University of New South Wales", "aff_unique_dep": "Department of Data Science;School of Computer Science and Engineering", "aff_unique_url": "https://www.eurecom.fr;https://www.unsw.edu.au", "aff_unique_abbr": ";UNSW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;0", "aff_country_unique": "France;Australia" }, { "title": "Random Fourier Features for Kernel Ridge Regression: Approximation Bounds and Statistical Guarantees", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/800", "id": "800", "author_site": "Haim Avron, Michael Kapralov, Cameron Musco, Christopher Musco, Ameya Velingker, Amir Zandieh", "author": "Haim Avron; Michael Kapralov; Cameron Musco; Christopher Musco; Ameya Velingker; Amir Zandieh", "abstract": "Random Fourier features is one of the most popular techniques for scaling up kernel methods, such as kernel ridge regression. However, despite impressive empirical results, the statistical properties of random Fourier features are still not well understood. In this paper we take steps toward filling this gap. Specifically, we approach random Fourier features from a spectral matrix approximation point of view, give tight bounds on the number of Fourier features required to achieve a spectral approximation, and show how spectral matrix approximation bounds imply statistical guarantees for kernel ridge regression.", "bibtex": "@InProceedings{pmlr-v70-avron17a,\n title = \t {Random {F}ourier Features for Kernel Ridge Regression: Approximation Bounds and Statistical Guarantees},\n author = {Haim Avron and Michael Kapralov and Cameron Musco and Christopher Musco and Ameya Velingker and Amir Zandieh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {253--262},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/avron17a/avron17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/avron17a.html},\n abstract = \t {Random Fourier features is one of the most popular techniques for scaling up kernel methods, such as kernel ridge regression. However, despite impressive empirical results, the statistical properties of random Fourier features are still not well understood. In this paper we take steps toward filling this gap. Specifically, we approach random Fourier features from a spectral matrix approximation point of view, give tight bounds on the number of Fourier features required to achieve a spectral approximation, and show how spectral matrix approximation bounds imply statistical guarantees for kernel ridge regression.}\n}", "pdf": "http://proceedings.mlr.press/v70/avron17a/avron17a.pdf", "supp": "", "pdf_size": 571120, "gs_citation": 199, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7401630811839320640&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "School of Mathematical Sciences, Tel Aviv University, Israel; School of Computer and Communication Sciences, EPFL, Switzerland; Computer Science and Artificial Intelligence Laboratory, MIT, USA; Computer Science and Artificial Intelligence Laboratory, MIT, USA; School of Computer and Communication Sciences, EPFL, Switzerland; School of Computer and Communication Sciences, EPFL, Switzerland", "aff_domain": "post.tau.ac.il;epfl.ch; ; ;epfl.ch;epfl.ch", "email": "post.tau.ac.il;epfl.ch; ; ;epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/avron17a.html", "aff_unique_index": "0;1;2;2;1;1", "aff_unique_norm": "Tel Aviv University;EPFL;Massachusetts Institute of Technology", "aff_unique_dep": "School of Mathematical Sciences;School of Computer and Communication Sciences;Computer Science and Artificial Intelligence Laboratory", "aff_unique_url": "https://www.tau.ac.il;https://www.epfl.ch;https://web.mit.edu", "aff_unique_abbr": "TAU;EPFL;MIT", "aff_campus_unique_index": "0;2;2", "aff_campus_unique": "Tel Aviv;;Cambridge", "aff_country_unique_index": "0;1;2;2;1;1", "aff_country_unique": "Israel;Switzerland;United States" }, { "title": "Re-revisiting Learning on Hypergraphs: Confidence Interval and Subgradient Method", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/517", "id": "517", "author_site": "Chenzi Zhang, Shuguang Hu, Zhihao Gavin Tang, Hubert Chan", "author": "Chenzi Zhang; Shuguang Hu; Zhihao Gavin Tang; T-H. Hubert Chan", "abstract": "We revisit semi-supervised learning on hypergraphs. Same as previous approaches, our method uses a convex program whose objective function is not everywhere differentiable. We exploit the non-uniqueness of the optimal solutions, and consider confidence intervals which give the exact ranges that unlabeled vertices take in any optimal solution. Moreover, we give a much simpler approach for solving the convex program based on the subgradient method. Our experiments on real-world datasets confirm that our confidence interval approach on hypergraphs outperforms existing methods, and our sub-gradient method gives faster running times when the number of vertices is much larger than the number of edges.", "bibtex": "@InProceedings{pmlr-v70-zhang17d,\n title = \t {Re-revisiting Learning on Hypergraphs: Confidence Interval and Subgradient Method},\n author = {Chenzi Zhang and Shuguang Hu and Zhihao Gavin Tang and T-H. Hubert Chan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4026--4034},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17d/zhang17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17d.html},\n abstract = \t {We revisit semi-supervised learning on hypergraphs. Same as previous approaches, our method uses a convex program whose objective function is not everywhere differentiable. We exploit the non-uniqueness of the optimal solutions, and consider confidence intervals which give the exact ranges that unlabeled vertices take in any optimal solution. Moreover, we give a much simpler approach for solving the convex program based on the subgradient method. Our experiments on real-world datasets confirm that our confidence interval approach on hypergraphs outperforms existing methods, and our sub-gradient method gives faster running times when the number of vertices is much larger than the number of edges.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17d/zhang17d.pdf", "supp": "", "pdf_size": 476537, "gs_citation": 66, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17327682352328648097&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Hong Kong; University of Hong Kong; University of Hong Kong; University of Hong Kong", "aff_domain": "cs.hku.hk; ; ; ", "email": "cs.hku.hk; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/zhang17d.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Hong Kong", "aff_unique_dep": "", "aff_unique_url": "https://www.hku.hk", "aff_unique_abbr": "HKU", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "China" }, { "title": "Real-Time Adaptive Image Compression", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/544", "id": "544", "author_site": "Oren Rippel, Lubomir Bourdev", "author": "Oren Rippel; Lubomir Bourdev", "abstract": "We present a machine learning-based approach to lossy image compression which outperforms all existing codecs, while running in real-time. Our algorithm typically produces file sizes 3 times smaller than JPEG, 2.5 times smaller than JPEG 2000, and 2.3 times smaller than WebP on datasets of generic images across a spectrum of quality levels. At the same time, our codec is designed to be lightweight and deployable: for example, it can encode or decode the Kodak dataset in less than 10ms per image on GPU. Our architecture is an autoencoder featuring pyramidal analysis, an adaptive coding module, and regularization of the expected codelength. We also supplement our approach with adversarial training specialized towards use in a compression setting: this enables us to produce visually pleasing reconstructions for very low bitrates.", "bibtex": "@InProceedings{pmlr-v70-rippel17a,\n title = \t {Real-Time Adaptive Image Compression},\n author = {Oren Rippel and Lubomir Bourdev},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2922--2930},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/rippel17a/rippel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/rippel17a.html},\n abstract = \t {We present a machine learning-based approach to lossy image compression which outperforms all existing codecs, while running in real-time. Our algorithm typically produces file sizes 3 times smaller than JPEG, 2.5 times smaller than JPEG 2000, and 2.3 times smaller than WebP on datasets of generic images across a spectrum of quality levels. At the same time, our codec is designed to be lightweight and deployable: for example, it can encode or decode the Kodak dataset in less than 10ms per image on GPU. Our architecture is an autoencoder featuring pyramidal analysis, an adaptive coding module, and regularization of the expected codelength. We also supplement our approach with adversarial training specialized towards use in a compression setting: this enables us to produce visually pleasing reconstructions for very low bitrates.}\n}", "pdf": "http://proceedings.mlr.press/v70/rippel17a/rippel17a.pdf", "supp": "", "pdf_size": 3760270, "gs_citation": 692, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5970681635183362212&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "WaveOne Inc., Mountain View, CA, USA; WaveOne Inc., Mountain View, CA, USA", "aff_domain": "wave.one;wave.one", "email": "wave.one;wave.one", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/rippel17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "WaveOne Inc.", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Recovery Guarantees for One-hidden-layer Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/752", "id": "752", "author_site": "Kai Zhong, Zhao Song, Prateek Jain, Peter Bartlett, Inderjit Dhillon", "author": "Kai Zhong; Zhao Song; Prateek Jain; Peter L. Bartlett; Inderjit S. Dhillon", "abstract": "In this paper, we consider regression problems with one-hidden-layer neural networks (1NNs). We distill some properties of activation functions that lead to", "bibtex": "@InProceedings{pmlr-v70-zhong17a,\n title = \t {Recovery Guarantees for One-hidden-layer Neural Networks},\n author = {Kai Zhong and Zhao Song and Prateek Jain and Peter L. Bartlett and Inderjit S. Dhillon},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4140--4149},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhong17a/zhong17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhong17a.html},\n abstract = \t {In this paper, we consider regression problems with one-hidden-layer neural networks (1NNs). We distill some properties of activation functions that lead to", "pdf": "http://proceedings.mlr.press/v70/zhong17a/zhong17a.pdf", "supp": "", "pdf_size": 395376, "gs_citation": 374, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11965390343759936388&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "The University of Texas at Austin; The University of Texas at Austin; Microsoft Research, India; University of California, Berkeley; The University of Texas at Austin", "aff_domain": "ices.utexas.edu;utexas.edu;microsoft.com;cs.berkeley.edu;cs.utexas.edu", "email": "ices.utexas.edu;utexas.edu;microsoft.com;cs.berkeley.edu;cs.utexas.edu", "github": "", "project": "https://arxiv.org/pdf/1706.03175", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/zhong17a.html", "aff_unique_index": "0;0;1;2;0", "aff_unique_norm": "University of Texas at Austin;Microsoft;University of California, Berkeley", "aff_unique_dep": ";Microsoft Research;", "aff_unique_url": "https://www.utexas.edu;https://www.microsoft.com/en-us/research/group/india.aspx;https://www.berkeley.edu", "aff_unique_abbr": "UT Austin;MSR India;UC Berkeley", "aff_campus_unique_index": "0;0;2;0", "aff_campus_unique": "Austin;;Berkeley", "aff_country_unique_index": "0;0;1;0;0", "aff_country_unique": "United States;India" }, { "title": "Recurrent Highway Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/656", "id": "656", "author_site": "Julian Zilly, Rupesh Srivastava, Jan Koutnik, J\u00fcrgen Schmidhuber", "author": "Julian Georg Zilly; Rupesh Kumar Srivastava; Jan Koutn\u0131\u0301k; J\u00fcrgen Schmidhuber", "abstract": "Many sequential processing tasks require complex nonlinear transition functions from one step to the next. However, recurrent neural networks with \u201cdeep\u201d transition functions remain difficult to train, even when using Long Short-Term Memory (LSTM) networks. We introduce a novel theoretical analysis of recurrent networks based on Gersgorin\u2019s circle theorem that illuminates several modeling and optimization issues and improves our understanding of the LSTM cell. Based on this analysis we propose Recurrent Highway Networks, which extend the LSTM architecture to allow step-to-step transition depths larger than one. Several language modeling experiments demonstrate that the proposed architecture results in powerful and efficient models. On the Penn Treebank corpus, solely increasing the transition depth from 1 to 10 improves word-level perplexity from 90.6 to 65.4 using the same number of parameters. On the larger Wikipedia datasets for character prediction (text8 and enwik8), RHNs outperform all previous results and achieve an entropy of 1.27 bits per character.", "bibtex": "@InProceedings{pmlr-v70-zilly17a,\n title = \t {Recurrent Highway Networks},\n author = {Julian Georg Zilly and Rupesh Kumar Srivastava and Jan Koutn\\'{\\i}k and J{\\\"u}rgen Schmidhuber},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4189--4198},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zilly17a/zilly17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zilly17a.html},\n abstract = \t {Many sequential processing tasks require complex nonlinear transition functions from one step to the next. However, recurrent neural networks with \u201cdeep\u201d transition functions remain difficult to train, even when using Long Short-Term Memory (LSTM) networks. We introduce a novel theoretical analysis of recurrent networks based on Gersgorin\u2019s circle theorem that illuminates several modeling and optimization issues and improves our understanding of the LSTM cell. Based on this analysis we propose Recurrent Highway Networks, which extend the LSTM architecture to allow step-to-step transition depths larger than one. Several language modeling experiments demonstrate that the proposed architecture results in powerful and efficient models. On the Penn Treebank corpus, solely increasing the transition depth from 1 to 10 improves word-level perplexity from 90.6 to 65.4 using the same number of parameters. On the larger Wikipedia datasets for character prediction (text8 and enwik8), RHNs outperform all previous results and achieve an entropy of 1.27 bits per character.}\n}", "pdf": "http://proceedings.mlr.press/v70/zilly17a/zilly17a.pdf", "supp": "", "pdf_size": 1722753, "gs_citation": 573, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13421244339578314709&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "ETH Z\u00fcrich, Switzerland+The Swiss AI Lab IDSIA (USI-SUPSI) & NNAISENSE, Switzerland; The Swiss AI Lab IDSIA (USI-SUPSI) & NNAISENSE, Switzerland; The Swiss AI Lab IDSIA (USI-SUPSI) & NNAISENSE, Switzerland; The Swiss AI Lab IDSIA (USI-SUPSI) & NNAISENSE, Switzerland", "aff_domain": "ethz.ch;idsia.ch; ; ", "email": "ethz.ch;idsia.ch; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/zilly17a.html", "aff_unique_index": "0+1;1;1;1", "aff_unique_norm": "ETH Zurich;Swiss AI Lab IDSIA", "aff_unique_dep": ";AI Lab", "aff_unique_url": "https://www.ethz.ch;https://www.idsia.ch/", "aff_unique_abbr": "ETHZ;IDSIA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Recursive Partitioning for Personalization using Observational Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/818", "id": "818", "author": "Nathan Kallus", "abstract": "We study the problem of learning to choose from $m$ discrete treatment options (e.g., news item or medical drug) the one with best causal effect for a particular instance (e.g., user or patient) where the training data consists of passive observations of covariates, treatment, and the outcome of the treatment. The standard approach to this problem is regress and compare: split the training data by treatment, fit a regression model in each split, and, for a new instance, predict all $m$ outcomes and pick the best. By reformulating the problem as a single learning task rather than $m$ separate ones, we propose a new approach based on recursively partitioning the data into regimes where different treatments are optimal. We extend this approach to an optimal partitioning approach that finds a globally optimal partition, achieving a compact, interpretable, and impactful personalization model. We develop new tools for validating and evaluating personalization models on observational data and use these to demonstrate the power of our novel approaches in a personalized medicine and a job training application.", "bibtex": "@InProceedings{pmlr-v70-kallus17a,\n title = \t {Recursive Partitioning for Personalization using Observational Data},\n author = {Nathan Kallus},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1789--1798},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kallus17a/kallus17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kallus17a.html},\n abstract = \t {We study the problem of learning to choose from $m$ discrete treatment options (e.g., news item or medical drug) the one with best causal effect for a particular instance (e.g., user or patient) where the training data consists of passive observations of covariates, treatment, and the outcome of the treatment. The standard approach to this problem is regress and compare: split the training data by treatment, fit a regression model in each split, and, for a new instance, predict all $m$ outcomes and pick the best. By reformulating the problem as a single learning task rather than $m$ separate ones, we propose a new approach based on recursively partitioning the data into regimes where different treatments are optimal. We extend this approach to an optimal partitioning approach that finds a globally optimal partition, achieving a compact, interpretable, and impactful personalization model. We develop new tools for validating and evaluating personalization models on observational data and use these to demonstrate the power of our novel approaches in a personalized medicine and a job training application.}\n}", "pdf": "http://proceedings.mlr.press/v70/kallus17a/kallus17a.pdf", "supp": "", "pdf_size": 461635, "gs_citation": 118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5874958531541631320&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "School of Operations Research and Information Engineering and Cornell Tech, Cornell University", "aff_domain": "cornell.edu", "email": "cornell.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/kallus17a.html", "aff_unique_index": "0", "aff_unique_norm": "Cornell University", "aff_unique_dep": "School of Operations Research and Information Engineering", "aff_unique_url": "https://www.cornell.edu", "aff_unique_abbr": "Cornell", "aff_campus_unique_index": "0", "aff_campus_unique": "Ithaca", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Reduced Space and Faster Convergence in Imperfect-Information Games via Pruning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/862", "id": "862", "author_site": "Noam Brown, Tuomas Sandholm", "author": "Noam Brown; Tuomas Sandholm", "abstract": "Iterative algorithms such as Counterfactual Regret Minimization (CFR) are the most popular way to solve large zero-sum imperfect-information games. In this paper we introduce Best-Response Pruning (BRP), an improvement to iterative algorithms such as CFR that allows poorly-performing actions to be temporarily pruned. We prove that when using CFR in zero-sum games, adding BRP will asymptotically prune any action that is not part of a best response to some Nash equilibrium. This leads to provably faster convergence and lower space requirements. Experiments show that BRP results in a factor of 7 reduction in space, and the reduction factor increases with game size.", "bibtex": "@InProceedings{pmlr-v70-brown17a,\n title = \t {Reduced Space and Faster Convergence in Imperfect-Information Games via Pruning},\n author = {Noam Brown and Tuomas Sandholm},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {596--604},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/brown17a/brown17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/brown17a.html},\n abstract = \t {Iterative algorithms such as Counterfactual Regret Minimization (CFR) are the most popular way to solve large zero-sum imperfect-information games. In this paper we introduce Best-Response Pruning (BRP), an improvement to iterative algorithms such as CFR that allows poorly-performing actions to be temporarily pruned. We prove that when using CFR in zero-sum games, adding BRP will asymptotically prune any action that is not part of a best response to some Nash equilibrium. This leads to provably faster convergence and lower space requirements. Experiments show that BRP results in a factor of 7 reduction in space, and the reduction factor increases with game size.}\n}", "pdf": "http://proceedings.mlr.press/v70/brown17a/brown17a.pdf", "supp": "", "pdf_size": 516608, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13044365139694853567&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Computer Science Department, Carnegie Mellon University, Pittsburgh, PA, USA; Computer Science Department, Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/brown17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Computer Science Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Pittsburgh", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Regret Minimization in Behaviorally-Constrained Zero-Sum Games", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/501", "id": "501", "author_site": "Gabriele Farina, Christian Kroer, Tuomas Sandholm", "author": "Gabriele Farina; Christian Kroer; Tuomas Sandholm", "abstract": "No-regret learning has emerged as a powerful tool for solving extensive-form games. This was facilitated by the counterfactual-regret minimization (CFR) framework, which relies on the instantiation of regret minimizers for simplexes at each information set of the game. We use an instantiation of the CFR framework to develop algorithms for solving behaviorally-constrained (and, as a special case, perturbed in the Selten sense) extensive-form games, which allows us to compute approximate Nash equilibrium refinements. Nash equilibrium refinements are motivated by a major deficiency in Nash equilibrium: it provides virtually no guarantees on how it will play in parts of the game tree that are reached with zero probability. Refinements can mend this issue, but have not been adopted in practice, mostly due to a lack of scalable algorithms. We show that, compared to standard algorithms, our method finds solutions that have substantially better refinement properties, while enjoying a convergence rate that is comparable to that of state-of-the-art algorithms for Nash equilibrium computation both in theory and practice.", "bibtex": "@InProceedings{pmlr-v70-farina17a,\n title = \t {Regret Minimization in Behaviorally-Constrained Zero-Sum Games},\n author = {Gabriele Farina and Christian Kroer and Tuomas Sandholm},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1107--1116},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/farina17a/farina17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/farina17a.html},\n abstract = \t {No-regret learning has emerged as a powerful tool for solving extensive-form games. This was facilitated by the counterfactual-regret minimization (CFR) framework, which relies on the instantiation of regret minimizers for simplexes at each information set of the game. We use an instantiation of the CFR framework to develop algorithms for solving behaviorally-constrained (and, as a special case, perturbed in the Selten sense) extensive-form games, which allows us to compute approximate Nash equilibrium refinements. Nash equilibrium refinements are motivated by a major deficiency in Nash equilibrium: it provides virtually no guarantees on how it will play in parts of the game tree that are reached with zero probability. Refinements can mend this issue, but have not been adopted in practice, mostly due to a lack of scalable algorithms. We show that, compared to standard algorithms, our method finds solutions that have substantially better refinement properties, while enjoying a convergence rate that is comparable to that of state-of-the-art algorithms for Nash equilibrium computation both in theory and practice.}\n}", "pdf": "http://proceedings.mlr.press/v70/farina17a/farina17a.pdf", "supp": "", "pdf_size": 307838, "gs_citation": 36, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12741599394656804424&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/farina17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Regularising Non-linear Models Using Feature Side-information", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/758", "id": "758", "author_site": "maolaaisha aminanmu, Pablo Strasser, Alexandros Kalousis", "author": "Amina Mollaysa; Pablo Strasser; Alexandros Kalousis", "abstract": "Very often features come with their own vectorial descriptions which provide detailed information about their properties. We refer to these vectorial descriptions as feature side-information. In the standard learning scenario, input is represented as a vector of features and the feature side-information is most often ignored or used only for feature selection prior to model fitting. We believe that feature side-information which carries information about features intrinsic property will help improve model prediction if used in a proper way during learning process. In this paper, we propose a framework that allows for the incorporation of the feature side-information during the learning of very general model families to improve the prediction performance. We control the structures of the learned models so that they reflect features\u2019 similarities as these are defined on the basis of the side-information. We perform experiments on a number of benchmark datasets which show significant predictive performance gains, over a number of baselines, as a result of the exploitation of the side-information.", "bibtex": "@InProceedings{pmlr-v70-mollaysa17a,\n title = \t {Regularising Non-linear Models Using Feature Side-information},\n author = {Amina Mollaysa and Pablo Strasser and Alexandros Kalousis},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2508--2517},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mollaysa17a/mollaysa17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mollaysa17a.html},\n abstract = \t {Very often features come with their own vectorial descriptions which provide detailed information about their properties. We refer to these vectorial descriptions as feature side-information. In the standard learning scenario, input is represented as a vector of features and the feature side-information is most often ignored or used only for feature selection prior to model fitting. We believe that feature side-information which carries information about features intrinsic property will help improve model prediction if used in a proper way during learning process. In this paper, we propose a framework that allows for the incorporation of the feature side-information during the learning of very general model families to improve the prediction performance. We control the structures of the learned models so that they reflect features\u2019 similarities as these are defined on the basis of the side-information. We perform experiments on a number of benchmark datasets which show significant predictive performance gains, over a number of baselines, as a result of the exploitation of the side-information.}\n}", "pdf": "http://proceedings.mlr.press/v70/mollaysa17a/mollaysa17a.pdf", "supp": "", "pdf_size": 645834, "gs_citation": 17, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1943783147549896982&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "University of Applied Sciences, Western Switzerland + University of Geneva; University of Applied Sciences, Western Switzerland + University of Geneva; University of Applied Sciences, Western Switzerland + University of Geneva", "aff_domain": "hesge.ch;hesge.ch;hesge.ch", "email": "hesge.ch;hesge.ch;hesge.ch", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mollaysa17a.html", "aff_unique_index": "0+1;0+1;0+1", "aff_unique_norm": "University of Applied Sciences Western Switzerland;University of Geneva", "aff_unique_dep": ";", "aff_unique_url": "https://www.hes-so.ch/en;https://www.unige.ch", "aff_unique_abbr": "HES-SO;UNIGE", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Switzerland" }, { "title": "Reinforcement Learning with Deep Energy-Based Policies", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/838", "id": "838", "author_site": "Tuomas Haarnoja, Haoran Tang, Pieter Abbeel, Sergey Levine", "author": "Tuomas Haarnoja; Haoran Tang; Pieter Abbeel; Sergey Levine", "abstract": "We propose a method for learning expressive energy-based policies for continuous states and actions, which has been feasible only in tabular domains before. We apply our method to learning maximum entropy policies, resulting into a new algorithm, called soft Q-learning, that expresses the optimal policy via a Boltzmann distribution. We use the recently proposed amortized Stein variational gradient descent to learn a stochastic sampling network that approximates samples from this distribution. The benefits of the proposed algorithm include improved exploration and compositionality that allows transferring skills between tasks, which we confirm in simulated experiments with swimming and walking robots. We also draw a connection to actor-critic methods, which can be viewed performing approximate inference on the corresponding energy-based model.", "bibtex": "@InProceedings{pmlr-v70-haarnoja17a,\n title = \t {Reinforcement Learning with Deep Energy-Based Policies},\n author = {Tuomas Haarnoja and Haoran Tang and Pieter Abbeel and Sergey Levine},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1352--1361},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/haarnoja17a/haarnoja17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/haarnoja17a.html},\n abstract = \t {We propose a method for learning expressive energy-based policies for continuous states and actions, which has been feasible only in tabular domains before. We apply our method to learning maximum entropy policies, resulting into a new algorithm, called soft Q-learning, that expresses the optimal policy via a Boltzmann distribution. We use the recently proposed amortized Stein variational gradient descent to learn a stochastic sampling network that approximates samples from this distribution. The benefits of the proposed algorithm include improved exploration and compositionality that allows transferring skills between tasks, which we confirm in simulated experiments with swimming and walking robots. We also draw a connection to actor-critic methods, which can be viewed performing approximate inference on the corresponding energy-based model.}\n}", "pdf": "http://proceedings.mlr.press/v70/haarnoja17a/haarnoja17a.pdf", "supp": "", "pdf_size": 2284223, "gs_citation": 1695, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10187244454208251417&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "UC Berkeley, Department of Electrical Engineering and Computer Sciences+OpenAI+International Computer Science Institute; UC Berkeley, Department of Mathematics; UC Berkeley, Department of Electrical Engineering and Computer Sciences+OpenAI+International Computer Science Institute; UC Berkeley, Department of Electrical Engineering and Computer Sciences+OpenAI+International Computer Science Institute", "aff_domain": "berkeley.edu;math.berkeley.edu; ; ", "email": "berkeley.edu;math.berkeley.edu; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/haarnoja17a.html", "aff_unique_index": "0+1+2;0;0+1+2;0+1+2", "aff_unique_norm": "University of California, Berkeley;OpenAI;International Computer Science Institute", "aff_unique_dep": "Department of Electrical Engineering and Computer Sciences;;", "aff_unique_url": "https://www.berkeley.edu;https://openai.com;https://www.icsi.berkeley.edu/", "aff_unique_abbr": "UC Berkeley;OpenAI;ICSI", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0+0+0;0;0+0+0;0+0+0", "aff_country_unique": "United States" }, { "title": "Relative Fisher Information and Natural Gradient for Learning Large Modular Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/458", "id": "458", "author_site": "Ke Sun, Frank Nielsen", "author": "Ke Sun; Frank Nielsen", "abstract": "Fisher information and natural gradient provided deep insights and powerful tools to artificial neural networks. However related analysis becomes more and more difficult as the learner\u2019s structure turns large and complex. This paper makes a preliminary step towards a new direction. We extract a local component from a large neural system, and define its relative Fisher information metric that describes accurately this small component, and is invariant to the other parts of the system. This concept is important because the geometry structure is much simplified and it can be easily applied to guide the learning of neural networks. We provide an analysis on a list of commonly used components, and demonstrate how to use this concept to further improve optimization.", "bibtex": "@InProceedings{pmlr-v70-sun17b,\n title = \t {Relative {F}isher Information and Natural Gradient for Learning Large Modular Models},\n author = {Ke Sun and Frank Nielsen},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3289--3298},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sun17b/sun17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/sun17b.html},\n abstract = \t {Fisher information and natural gradient provided deep insights and powerful tools to artificial neural networks. However related analysis becomes more and more difficult as the learner\u2019s structure turns large and complex. This paper makes a preliminary step towards a new direction. We extract a local component from a large neural system, and define its relative Fisher information metric that describes accurately this small component, and is invariant to the other parts of the system. This concept is important because the geometry structure is much simplified and it can be easily applied to guide the learning of neural networks. We provide an analysis on a list of commonly used components, and demonstrate how to use this concept to further improve optimization.}\n}", "pdf": "http://proceedings.mlr.press/v70/sun17b/sun17b.pdf", "supp": "", "pdf_size": 1367852, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13655004352618309760&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "King Abdullah University of Science and Technology (KAUST), Saudi Arabia; \u00b4Ecole Polytechnique, France + Sony Computer Science Laboratories Inc., Japan", "aff_domain": "ieee.org;acm.org", "email": "ieee.org;acm.org", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/sun17b.html", "aff_unique_index": "0;1+2", "aff_unique_norm": "King Abdullah University of Science and Technology;Ecole Polytechnique;Sony Computer Science Laboratories Inc.", "aff_unique_dep": ";;", "aff_unique_url": "https://www.kaust.edu.sa;https://www.ec-polytechnique.fr;https://www.sony.net/", "aff_unique_abbr": "KAUST;X;Sony CSL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1+2", "aff_country_unique": "Saudi Arabia;France;Japan" }, { "title": "Resource-efficient Machine Learning in 2 KB RAM for the Internet of Things", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/696", "id": "696", "author_site": "Ashish Kumar, Saurabh Goyal, Manik Varma", "author": "Ashish Kumar; Saurabh Goyal; Manik Varma", "abstract": "This paper develops a novel tree-based algorithm, called Bonsai, for efficient prediction on IoT devices \u2013 such as those based on the Arduino Uno board having an 8 bit ATmega328P microcontroller operating at 16 MHz with no native floating point support, 2 KB RAM and 32 KB read-only flash. Bonsai maintains prediction accuracy while minimizing model size and prediction costs by: (a) developing a tree model which learns a single, shallow, sparse tree with powerful nodes; (b) sparsely projecting all data into a low-dimensional space in which the tree is learnt; and (c) jointly learning all tree and projection parameters. Experimental results on multiple benchmark datasets demonstrate that Bonsai can make predictions in milliseconds even on slow microcontrollers, can fit in KB of memory, has lower battery consumption than all other algorithms while achieving prediction accuracies that can be as much as 30\\% higher than state-of-the-art methods for resource-efficient machine learning. Bonsai is also shown to generalize to other resource constrained settings beyond IoT by generating significantly better search results as compared to Bing\u2019s L3 ranker when the model size is restricted to 300 bytes. Bonsai\u2019s code can be downloaded from (http://www.manikvarma.org/code/Bonsai/download.html).", "bibtex": "@InProceedings{pmlr-v70-kumar17a,\n title = \t {Resource-efficient Machine Learning in 2 {KB} {RAM} for the Internet of Things},\n author = {Ashish Kumar and Saurabh Goyal and Manik Varma},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1935--1944},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kumar17a/kumar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kumar17a.html},\n abstract = \t {This paper develops a novel tree-based algorithm, called Bonsai, for efficient prediction on IoT devices \u2013 such as those based on the Arduino Uno board having an 8 bit ATmega328P microcontroller operating at 16 MHz with no native floating point support, 2 KB RAM and 32 KB read-only flash. Bonsai maintains prediction accuracy while minimizing model size and prediction costs by: (a) developing a tree model which learns a single, shallow, sparse tree with powerful nodes; (b) sparsely projecting all data into a low-dimensional space in which the tree is learnt; and (c) jointly learning all tree and projection parameters. Experimental results on multiple benchmark datasets demonstrate that Bonsai can make predictions in milliseconds even on slow microcontrollers, can fit in KB of memory, has lower battery consumption than all other algorithms while achieving prediction accuracies that can be as much as 30\\% higher than state-of-the-art methods for resource-efficient machine learning. Bonsai is also shown to generalize to other resource constrained settings beyond IoT by generating significantly better search results as compared to Bing\u2019s L3 ranker when the model size is restricted to 300 bytes. Bonsai\u2019s code can be downloaded from (http://www.manikvarma.org/code/Bonsai/download.html).}\n}", "pdf": "http://proceedings.mlr.press/v70/kumar17a/kumar17a.pdf", "supp": "", "pdf_size": 365135, "gs_citation": 317, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15338487202415461466&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 13, "aff": "Microsoft Research, Bangalore, India; CSE Department, IIT Delhi, India; Microsoft Research, Bangalore, India", "aff_domain": "microsoft.com;cse.iitd.ac.in;microsoft.com", "email": "microsoft.com;cse.iitd.ac.in;microsoft.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/kumar17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Microsoft;Indian Institute of Technology Delhi", "aff_unique_dep": "Microsoft Research;Computer Science and Engineering", "aff_unique_url": "https://www.microsoft.com/en-us/research/group/microsoft-research-india;https://www.iitdelhi.ac.in", "aff_unique_abbr": "MSR;IIT Delhi", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Bangalore;Delhi", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "Risk Bounds for Transferring Representations With and Without Fine-Tuning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/869", "id": "869", "author_site": "Daniel McNamara, Nina Balcan", "author": "Daniel McNamara; Maria-Florina Balcan", "abstract": "A popular machine learning strategy is the transfer of a representation (i.e. a feature extraction function) learned on a source task to a target task. Examples include the re-use of neural network weights or word embeddings. We develop sufficient conditions for the success of this approach. If the representation learned from the source task is fixed, we identify conditions on how the tasks relate to obtain an upper bound on target task risk via a VC dimension-based argument. We then consider using the representation from the source task to construct a prior, which is fine-tuned using target task data. We give a PAC-Bayes target task risk bound in this setting under suitable conditions. We show examples of our bounds using feedforward neural networks. Our results motivate a practical approach to weight transfer, which we validate with experiments.", "bibtex": "@InProceedings{pmlr-v70-mcnamara17a,\n title = \t {Risk Bounds for Transferring Representations With and Without Fine-Tuning},\n author = {Daniel McNamara and Maria-Florina Balcan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2373--2381},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mcnamara17a/mcnamara17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mcnamara17a.html},\n abstract = \t {A popular machine learning strategy is the transfer of a representation (i.e. a feature extraction function) learned on a source task to a target task. Examples include the re-use of neural network weights or word embeddings. We develop sufficient conditions for the success of this approach. If the representation learned from the source task is fixed, we identify conditions on how the tasks relate to obtain an upper bound on target task risk via a VC dimension-based argument. We then consider using the representation from the source task to construct a prior, which is fine-tuned using target task data. We give a PAC-Bayes target task risk bound in this setting under suitable conditions. We show examples of our bounds using feedforward neural networks. Our results motivate a practical approach to weight transfer, which we validate with experiments.}\n}", "pdf": "http://proceedings.mlr.press/v70/mcnamara17a/mcnamara17a.pdf", "supp": "", "pdf_size": 436270, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17597017543963587196&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "The Australian National University and Data61, Canberra, ACT, Australia; Carnegie Mellon University, Pittsburgh, PA, USA", "aff_domain": "anu.edu.au; ", "email": "anu.edu.au; ", "github": "", "project": "http://code.google.com/archive/p/word2vec; http://caffe.berkeleyvision.org/model_zoo; http://vlfeat.org/matconvnet/pretrained", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/mcnamara17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Australian National University;Carnegie Mellon University", "aff_unique_dep": ";", "aff_unique_url": "https://www.anu.edu.au;https://www.cmu.edu", "aff_unique_abbr": "ANU;CMU", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Canberra;Pittsburgh", "aff_country_unique_index": "0;1", "aff_country_unique": "Australia;United States" }, { "title": "Robust Adversarial Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/754", "id": "754", "author_site": "Lerrel Pinto, James Davidson, Rahul Sukthankar, Abhinav Gupta", "author": "Lerrel Pinto; James Davidson; Rahul Sukthankar; Abhinav Gupta", "abstract": "Deep neural networks coupled with fast simulation and improved computational speeds have led to recent successes in the field of reinforcement learning (RL). However, most current RL-based approaches fail to generalize since: (a) the gap between simulation and real world is so large that policy-learning approaches fail to transfer; (b) even if policy learning is done in real world, the data scarcity leads to failed generalization from training to test scenarios (e.g., due to different friction or object masses). Inspired from H-infinity control methods, we note that both modeling errors and differences in training and test scenarios can just be viewed as extra forces/disturbances in the system. This paper proposes the idea of robust adversarial reinforcement learning (RARL), where we train an agent to operate in the presence of a destabilizing adversary that applies disturbance forces to the system. The jointly trained adversary is reinforced \u2013 that is, it learns an optimal destabilization policy. We formulate the policy learning as a zero-sum, minimax objective function. Extensive experiments in multiple environments (InvertedPendulum, HalfCheetah, Swimmer, Hopper, Walker2d and Ant) conclusively demonstrate that our method (a) improves training stability; (b) is robust to differences in training/test conditions; and c) outperform the baseline even in the absence of the adversary.", "bibtex": "@InProceedings{pmlr-v70-pinto17a,\n title = \t {Robust Adversarial Reinforcement Learning},\n author = {Lerrel Pinto and James Davidson and Rahul Sukthankar and Abhinav Gupta},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2817--2826},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pinto17a/pinto17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pinto17a.html},\n abstract = \t {Deep neural networks coupled with fast simulation and improved computational speeds have led to recent successes in the field of reinforcement learning (RL). However, most current RL-based approaches fail to generalize since: (a) the gap between simulation and real world is so large that policy-learning approaches fail to transfer; (b) even if policy learning is done in real world, the data scarcity leads to failed generalization from training to test scenarios (e.g., due to different friction or object masses). Inspired from H-infinity control methods, we note that both modeling errors and differences in training and test scenarios can just be viewed as extra forces/disturbances in the system. This paper proposes the idea of robust adversarial reinforcement learning (RARL), where we train an agent to operate in the presence of a destabilizing adversary that applies disturbance forces to the system. The jointly trained adversary is reinforced \u2013 that is, it learns an optimal destabilization policy. We formulate the policy learning as a zero-sum, minimax objective function. Extensive experiments in multiple environments (InvertedPendulum, HalfCheetah, Swimmer, Hopper, Walker2d and Ant) conclusively demonstrate that our method (a) improves training stability; (b) is robust to differences in training/test conditions; and c) outperform the baseline even in the absence of the adversary.}\n}", "pdf": "http://proceedings.mlr.press/v70/pinto17a/pinto17a.pdf", "supp": "", "pdf_size": 2748996, "gs_citation": 1161, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10521359398547093876&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 7, "aff": "Carnegie Mellon University; Google Brain; Google Research; Carnegie Mellon University + Google Research", "aff_domain": "cs.cmu.edu; ; ; ", "email": "cs.cmu.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/pinto17a.html", "aff_unique_index": "0;1;1;0+1", "aff_unique_norm": "Carnegie Mellon University;Google", "aff_unique_dep": ";Google Brain", "aff_unique_url": "https://www.cmu.edu;https://brain.google.com", "aff_unique_abbr": "CMU;Google Brain", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "United States" }, { "title": "Robust Budget Allocation via Continuous Submodular Functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/667", "id": "667", "author_site": "Matthew J Staib, Stefanie Jegelka", "author": "Matthew Staib; Stefanie Jegelka", "abstract": "The optimal allocation of resources for maximizing influence, spread of information or coverage, has gained attention in the past years, in particular in machine learning and data mining. But in applications, the parameters of the problem are rarely known exactly, and using wrong parameters can lead to undesirable outcomes. We hence revisit a continuous version of the Budget Allocation or Bipartite Influence Maximization problem introduced by Alon et al. (2012) from a robust optimization perspective, where an adversary may choose the least favorable parameters within a confidence set. The resulting problem is a nonconvex-concave saddle point problem (or game). We show that this nonconvex problem can be solved exactly by leveraging connections to continuous submodular functions, and by solving a constrained submodular minimization problem. Although constrained submodular minimization is hard in general, here, we establish conditions under which such a problem can be solved to arbitrary precision $\\epsilon$.", "bibtex": "@InProceedings{pmlr-v70-staib17a,\n title = \t {Robust Budget Allocation via Continuous Submodular Functions},\n author = {Matthew Staib and Stefanie Jegelka},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3230--3240},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/staib17a/staib17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/staib17a.html},\n abstract = \t {The optimal allocation of resources for maximizing influence, spread of information or coverage, has gained attention in the past years, in particular in machine learning and data mining. But in applications, the parameters of the problem are rarely known exactly, and using wrong parameters can lead to undesirable outcomes. We hence revisit a continuous version of the Budget Allocation or Bipartite Influence Maximization problem introduced by Alon et al. (2012) from a robust optimization perspective, where an adversary may choose the least favorable parameters within a confidence set. The resulting problem is a nonconvex-concave saddle point problem (or game). We show that this nonconvex problem can be solved exactly by leveraging connections to continuous submodular functions, and by solving a constrained submodular minimization problem. Although constrained submodular minimization is hard in general, here, we establish conditions under which such a problem can be solved to arbitrary precision $\\epsilon$.}\n}", "pdf": "http://proceedings.mlr.press/v70/staib17a/staib17a.pdf", "supp": "", "pdf_size": 736706, "gs_citation": 62, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3637381770369787981&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Massachusetts Institute of Technology; Massachusetts Institute of Technology", "aff_domain": "mit.edu;mit.edu", "email": "mit.edu;mit.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/staib17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "", "aff_unique_url": "https://web.mit.edu", "aff_unique_abbr": "MIT", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Robust Gaussian Graphical Model Estimation with Arbitrary Corruption", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/718", "id": "718", "author_site": "Lingxiao Wang, Quanquan Gu", "author": "Lingxiao Wang; Quanquan Gu", "abstract": "We study the problem of estimating the high-dimensional Gaussian graphical model where the data are arbitrarily corrupted. We propose a robust estimator for the sparse precision matrix in the high-dimensional regime. At the core of our method is a robust covariance matrix estimator, which is based on truncated inner product. We establish the statistical guarantee of our estimator on both estimation error and model selection consistency. In particular, we show that provided that the number of corrupted samples $n_2$ for each variable satisfies $n_2 \\lesssim \\sqrt{n}/\\sqrt{\\log d}$, where $n$ is the sample size and $d$ is the number of variables, the proposed robust precision matrix estimator attains the same statistical rate as the standard estimator for Gaussian graphical models. In addition, we propose a hypothesis testing procedure to assess the uncertainty of our robust estimator. We demonstrate the effectiveness of our method through extensive experiments on both synthetic data and real-world genomic data.", "bibtex": "@InProceedings{pmlr-v70-wang17d,\n title = \t {Robust {G}aussian Graphical Model Estimation with Arbitrary Corruption},\n author = {Lingxiao Wang and Quanquan Gu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3617--3626},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17d/wang17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17d.html},\n abstract = \t {We study the problem of estimating the high-dimensional Gaussian graphical model where the data are arbitrarily corrupted. We propose a robust estimator for the sparse precision matrix in the high-dimensional regime. At the core of our method is a robust covariance matrix estimator, which is based on truncated inner product. We establish the statistical guarantee of our estimator on both estimation error and model selection consistency. In particular, we show that provided that the number of corrupted samples $n_2$ for each variable satisfies $n_2 \\lesssim \\sqrt{n}/\\sqrt{\\log d}$, where $n$ is the sample size and $d$ is the number of variables, the proposed robust precision matrix estimator attains the same statistical rate as the standard estimator for Gaussian graphical models. In addition, we propose a hypothesis testing procedure to assess the uncertainty of our robust estimator. We demonstrate the effectiveness of our method through extensive experiments on both synthetic data and real-world genomic data.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17d/wang17d.pdf", "supp": "", "pdf_size": 876182, "gs_citation": 15, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16358039482817668080&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA; Department of Computer Science, University of Virginia, Charlottesville, Virginia, USA", "aff_domain": "virginia.edu;virginia.edu", "email": "virginia.edu;virginia.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/wang17d.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Virginia", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.virginia.edu", "aff_unique_abbr": "UVA", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Charlottesville", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Robust Guarantees of Stochastic Greedy Algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/475", "id": "475", "author_site": "Yaron Singer, Avinatan Hassidim", "author": "Avinatan Hassidim; Yaron Singer", "abstract": "In this paper we analyze the robustness of stochastic variants of the greedy algorithm for submodular maximization. Our main result shows that for maximizing a monotone submodular function under a cardinality constraint, iteratively selecting an element whose marginal contribution is approximately maximal in expectation is a sufficient condition to obtain the optimal approximation guarantee with exponentially high probability, assuming the cardinality is sufficiently large. One consequence of our result is that the linear-time STOCHASTIC-GREEDY algorithm recently proposed in (Mirzasoleiman et al.,2015) achieves the optimal running time while maintaining an optimal approximation guarantee. We also show that high probability guarantees cannot be obtained for stochastic greedy algorithms under matroid constraints, and prove an approximation guarantee which holds in expectation. In contrast to the guarantees of the greedy algorithm, we show that the approximation ratio of stochastic local search is arbitrarily bad, with high probability, as well as in expectation.", "bibtex": "@InProceedings{pmlr-v70-hassidim17a,\n title = \t {Robust Guarantees of Stochastic Greedy Algorithms},\n author = {Avinatan Hassidim and Yaron Singer},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1424--1432},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hassidim17a/hassidim17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hassidim17a.html},\n abstract = \t {In this paper we analyze the robustness of stochastic variants of the greedy algorithm for submodular maximization. Our main result shows that for maximizing a monotone submodular function under a cardinality constraint, iteratively selecting an element whose marginal contribution is approximately maximal in expectation is a sufficient condition to obtain the optimal approximation guarantee with exponentially high probability, assuming the cardinality is sufficiently large. One consequence of our result is that the linear-time STOCHASTIC-GREEDY algorithm recently proposed in (Mirzasoleiman et al.,2015) achieves the optimal running time while maintaining an optimal approximation guarantee. We also show that high probability guarantees cannot be obtained for stochastic greedy algorithms under matroid constraints, and prove an approximation guarantee which holds in expectation. In contrast to the guarantees of the greedy algorithm, we show that the approximation ratio of stochastic local search is arbitrarily bad, with high probability, as well as in expectation.}\n}", "pdf": "http://proceedings.mlr.press/v70/hassidim17a/hassidim17a.pdf", "supp": "", "pdf_size": 340493, "gs_citation": 19, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2900101234632587277&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Bar Ilan University and Google; Harvard University", "aff_domain": "cs.biu.ac.il;seas.harvard.edu", "email": "cs.biu.ac.il;seas.harvard.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/hassidim17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Bar-Ilan University;Harvard University", "aff_unique_dep": ";", "aff_unique_url": "https://www.biu.ac.il;https://www.harvard.edu", "aff_unique_abbr": "BIU;Harvard", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1", "aff_country_unique": "Israel;United States" }, { "title": "Robust Probabilistic Modeling with Bayesian Data Reweighting", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/471", "id": "471", "author_site": "Yixin Wang, Alp Kucukelbir, David Blei", "author": "Yixin Wang; Alp Kucukelbir; David M. Blei", "abstract": "Probabilistic models analyze data by relying on a set of assumptions. Data that exhibit deviations from these assumptions can undermine inference and prediction quality. Robust models offer protection against mismatch between a model\u2019s assumptions and reality. We propose a way to systematically detect and mitigate mismatch of a large class of probabilistic models. The idea is to raise the likelihood of each observation to a weight and then to infer both the latent variables and the weights from data. Inferring the weights allows a model to identify observations that match its assumptions and down-weight others. This enables robust inference and improves predictive accuracy. We study four different forms of mismatch with reality, ranging from missing latent groups to structure misspecification. A Poisson factorization analysis of the Movielens 1M dataset shows the benefits of this approach in a practical scenario.", "bibtex": "@InProceedings{pmlr-v70-wang17g,\n title = \t {Robust Probabilistic Modeling with {B}ayesian Data Reweighting},\n author = {Yixin Wang and Alp Kucukelbir and David M. Blei},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3646--3655},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17g/wang17g.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17g.html},\n abstract = \t {Probabilistic models analyze data by relying on a set of assumptions. Data that exhibit deviations from these assumptions can undermine inference and prediction quality. Robust models offer protection against mismatch between a model\u2019s assumptions and reality. We propose a way to systematically detect and mitigate mismatch of a large class of probabilistic models. The idea is to raise the likelihood of each observation to a weight and then to infer both the latent variables and the weights from data. Inferring the weights allows a model to identify observations that match its assumptions and down-weight others. This enables robust inference and improves predictive accuracy. We study four different forms of mismatch with reality, ranging from missing latent groups to structure misspecification. A Poisson factorization analysis of the Movielens 1M dataset shows the benefits of this approach in a practical scenario.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17g/wang17g.pdf", "supp": "", "pdf_size": 590519, "gs_citation": 115, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6759406622668064116&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Columbia University; Columbia University; Columbia University", "aff_domain": "columbia.edu; ; ", "email": "columbia.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/wang17g.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Robust Structured Estimation with Single-Index Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/828", "id": "828", "author_site": "Sheng Chen, Arindam Banerjee", "author": "Sheng Chen; Arindam Banerjee", "abstract": "In this paper, we investigate general single-index models (SIMs) in high dimensions. Based on U-statistics, we propose two types of robust estimators for the recovery of model parameters, which can be viewed as generalizations of several existing algorithms for one-bit compressed sensing (1-bit CS). With minimal assumption on noise, the statistical guarantees are established for the generalized estimators under suitable conditions, which allow general structures of underlying parameter. Moreover, the proposed estimator is novelly instantiated for SIMs with monotone transfer function, and the obtained estimator can better leverage the monotonicity. Experimental results are provided to support our theoretical analyses.", "bibtex": "@InProceedings{pmlr-v70-chen17a,\n title = \t {Robust Structured Estimation with Single-Index Models},\n author = {Sheng Chen and Arindam Banerjee},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {712--721},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17a/chen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17a.html},\n abstract = \t {In this paper, we investigate general single-index models (SIMs) in high dimensions. Based on U-statistics, we propose two types of robust estimators for the recovery of model parameters, which can be viewed as generalizations of several existing algorithms for one-bit compressed sensing (1-bit CS). With minimal assumption on noise, the statistical guarantees are established for the generalized estimators under suitable conditions, which allow general structures of underlying parameter. Moreover, the proposed estimator is novelly instantiated for SIMs with monotone transfer function, and the obtained estimator can better leverage the monotonicity. Experimental results are provided to support our theoretical analyses.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17a/chen17a.pdf", "supp": "", "pdf_size": 173862, "gs_citation": 10, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11011988808408226843&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science & Engineering, University of Minnesota-Twin Cities, Minnesota, USA; Department of Computer Science & Engineering, University of Minnesota-Twin Cities, Minnesota, USA", "aff_domain": "cs.umn.edu;cs.umn.edu", "email": "cs.umn.edu;cs.umn.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/chen17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Minnesota-Twin Cities", "aff_unique_dep": "Department of Computer Science & Engineering", "aff_unique_url": "https://www.umn.edu", "aff_unique_abbr": "UMN", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Twin Cities", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Robust Submodular Maximization: A Non-Uniform Partitioning Approach", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/674", "id": "674", "author_site": "Ilija Bogunovic, Slobodan Mitrovic, Jonathan Scarlett, Volkan Cevher", "author": "Ilija Bogunovic; Slobodan Mitrovi\u0107; Jonathan Scarlett; Volkan Cevher", "abstract": "We study the problem of maximizing a monotone submodular function subject to a cardinality constraint $k$, with the added twist that a number of items $\\tau$ from the returned set may be removed. We focus on the worst-case setting considered by Orlin et al.\\ (2016), in which a constant-factor approximation guarantee was given for $\\tau = o(\\sqrt{k})$. In this paper, we solve a key open problem raised therein, presenting a new Partitioned Robust (PRo) submodular maximization algorithm that achieves the same guarantee for more general $\\tau = o(k)$. Our algorithm constructs partitions consisting of buckets with exponentially increasing sizes, and applies standard submodular optimization subroutines on the buckets in order to construct the robust solution. We numerically demonstrate the performance of PRo in data summarization and influence maximization, demonstrating gains over both the greedy algorithm and the algorithm of Orlin et al.\\ (2016).", "bibtex": "@InProceedings{pmlr-v70-bogunovic17a,\n title = \t {Robust Submodular Maximization: A Non-Uniform Partitioning Approach},\n author = {Ilija Bogunovic and Slobodan Mitrovi{\\'c} and Jonathan Scarlett and Volkan Cevher},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {508--516},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bogunovic17a/bogunovic17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bogunovic17a.html},\n abstract = \t {We study the problem of maximizing a monotone submodular function subject to a cardinality constraint $k$, with the added twist that a number of items $\\tau$ from the returned set may be removed. We focus on the worst-case setting considered by Orlin et al.\\ (2016), in which a constant-factor approximation guarantee was given for $\\tau = o(\\sqrt{k})$. In this paper, we solve a key open problem raised therein, presenting a new Partitioned Robust (PRo) submodular maximization algorithm that achieves the same guarantee for more general $\\tau = o(k)$. Our algorithm constructs partitions consisting of buckets with exponentially increasing sizes, and applies standard submodular optimization subroutines on the buckets in order to construct the robust solution. We numerically demonstrate the performance of PRo in data summarization and influence maximization, demonstrating gains over both the greedy algorithm and the algorithm of Orlin et al.\\ (2016).}\n}", "pdf": "http://proceedings.mlr.press/v70/bogunovic17a/bogunovic17a.pdf", "supp": "", "pdf_size": 1802905, "gs_citation": 77, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3655266594068619633&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "LIONS, EPFL, Switzerland; LTHC, EPFL, Switzerland; LIONS, EPFL, Switzerland; LIONS, EPFL, Switzerland", "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch", "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bogunovic17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "EPFL", "aff_unique_dep": "LIONS", "aff_unique_url": "https://www.epfl.ch", "aff_unique_abbr": "EPFL", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "RobustFill: Neural Program Learning under Noisy I/O", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/661", "id": "661", "author_site": "Jacob Devlin, Jonathan Uesato, Surya Bhupatiraju, Rishabh Singh, Abdelrahman Mohammad, Pushmeet Kohli", "author": "Jacob Devlin; Jonathan Uesato; Surya Bhupatiraju; Rishabh Singh; Abdel-rahman Mohamed; Pushmeet Kohli", "abstract": "The problem of automatically generating a computer program from some specification has been studied since the early days of AI. Recently, two competing approaches for `automatic program learning\u2019 have received significant attention: (1) `neural program synthesis\u2019, where a neural network is conditioned on input/output (I/O) examples and learns to generate a program, and (2) `neural program induction\u2019, where a neural network generates new outputs directly using a latent program representation. Here, for the first time, we directly compare both approaches on a large-scale, real-world learning task and we additionally contrast to rule-based program synthesis, which uses hand-crafted semantics to guide the program generation. Our neural models use a modified attention RNN to allow encoding of variable-sized sets of I/O pairs, which achieve 92\\% accuracy on a real-world test set, compared to the 34\\% accuracy of the previous best neural synthesis approach. The synthesis model also outperforms a comparable induction model on this task, but we more importantly demonstrate that the strength of each approach is highly dependent on the evaluation metric and end-user application. Finally, we show that we can train our neural models to remain very robust to the type of noise expected in real-world data (e.g., typos), while a highly-engineered rule-based system fails entirely.", "bibtex": "@InProceedings{pmlr-v70-devlin17a,\n title = \t {{R}obust{F}ill: Neural Program Learning under Noisy {I}/{O}},\n author = {Jacob Devlin and Jonathan Uesato and Surya Bhupatiraju and Rishabh Singh and Abdel-rahman Mohamed and Pushmeet Kohli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {990--998},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/devlin17a/devlin17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/devlin17a.html},\n abstract = \t {The problem of automatically generating a computer program from some specification has been studied since the early days of AI. Recently, two competing approaches for `automatic program learning\u2019 have received significant attention: (1) `neural program synthesis\u2019, where a neural network is conditioned on input/output (I/O) examples and learns to generate a program, and (2) `neural program induction\u2019, where a neural network generates new outputs directly using a latent program representation. Here, for the first time, we directly compare both approaches on a large-scale, real-world learning task and we additionally contrast to rule-based program synthesis, which uses hand-crafted semantics to guide the program generation. Our neural models use a modified attention RNN to allow encoding of variable-sized sets of I/O pairs, which achieve 92\\% accuracy on a real-world test set, compared to the 34\\% accuracy of the previous best neural synthesis approach. The synthesis model also outperforms a comparable induction model on this task, but we more importantly demonstrate that the strength of each approach is highly dependent on the evaluation metric and end-user application. Finally, we show that we can train our neural models to remain very robust to the type of noise expected in real-world data (e.g., typos), while a highly-engineered rule-based system fails entirely.}\n}", "pdf": "http://proceedings.mlr.press/v70/devlin17a/devlin17a.pdf", "supp": "", "pdf_size": 736287, "gs_citation": 483, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3655548984351937652&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Microsoft Research, Redmond, Washington, USA+MIT, Cambridge, Massachusetts, USA; MIT, Cambridge, Massachusetts, USA; MIT, Cambridge, Massachusetts, USA; Microsoft Research, Redmond, Washington, USA; Microsoft Research, Redmond, Washington, USA; Microsoft Research, Redmond, Washington, USA", "aff_domain": "microsoft.com; ; ; ; ;", "email": "microsoft.com; ; ; ; ;", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/devlin17a.html", "aff_unique_index": "0+1;1;1;0;0;0", "aff_unique_norm": "Microsoft;Massachusetts Institute of Technology", "aff_unique_dep": "Microsoft Research;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://web.mit.edu", "aff_unique_abbr": "MSR;MIT", "aff_campus_unique_index": "0+1;1;1;0;0;0", "aff_campus_unique": "Redmond;Cambridge", "aff_country_unique_index": "0+0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Rule-Enhanced Penalized Regression by Column Generation using Rectangular Maximum Agreement", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/604", "id": "604", "author_site": "Jonathan Eckstein, Noam Goldberg, Ai Kagawa", "author": "Jonathan Eckstein; Noam Goldberg; Ai Kagawa", "abstract": "We describe a learning procedure enhancing L1-penalized regression by adding dynamically generated rules describing multidimensional \u201cbox\u201d sets. Our rule-adding procedure is based on the classical column generation method for high-dimensional linear programming. The pricing problem for our column generation procedure reduces to the NP-hard rectangular maximum agreement (RMA) problem of finding a box that best discriminates between two weighted datasets. We solve this problem exactly using a parallel branch-and-bound procedure. The resulting rule-enhanced regression procedure is computation-intensive, but has promising prediction performance.", "bibtex": "@InProceedings{pmlr-v70-eckstein17a,\n title = \t {Rule-Enhanced Penalized Regression by Column Generation using Rectangular Maximum Agreement},\n author = {Jonathan Eckstein and Noam Goldberg and Ai Kagawa},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1059--1067},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/eckstein17a/eckstein17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/eckstein17a.html},\n abstract = \t {We describe a learning procedure enhancing L1-penalized regression by adding dynamically generated rules describing multidimensional \u201cbox\u201d sets. Our rule-adding procedure is based on the classical column generation method for high-dimensional linear programming. The pricing problem for our column generation procedure reduces to the NP-hard rectangular maximum agreement (RMA) problem of finding a box that best discriminates between two weighted datasets. We solve this problem exactly using a parallel branch-and-bound procedure. The resulting rule-enhanced regression procedure is computation-intensive, but has promising prediction performance.}\n}", "pdf": "http://proceedings.mlr.press/v70/eckstein17a/eckstein17a.pdf", "supp": "", "pdf_size": 1698976, "gs_citation": 9, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14102181445630491079&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Management Science and Information Systems, Rutgers University, Piscataway, NJ, USA; Department of Management, Bar-Ilan University, Ramat Gan, Israel; Doctoral Program in Operations Research, Rutgers University, Piscataway, NJ, USA", "aff_domain": "business.rutgers.edu; ; ", "email": "business.rutgers.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/eckstein17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "Rutgers University;Bar-Ilan University", "aff_unique_dep": "Management Science and Information Systems;Department of Management", "aff_unique_url": "https://www.rutgers.edu;https://www.biu.ac.il", "aff_unique_abbr": "Rutgers;BIU", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Piscataway;Ramat Gan", "aff_country_unique_index": "0;1;0", "aff_country_unique": "United States;Israel" }, { "title": "SARAH: A Novel Method for Machine Learning Problems Using Stochastic Recursive Gradient", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/605", "id": "605", "author_site": "Lam Nguyen, Jie Liu, Katya Scheinberg, Martin Takac", "author": "Lam M. Nguyen; Jie Liu; Katya Scheinberg; Martin Tak\u00e1\u010d", "abstract": "In this paper, we propose a StochAstic Recursive grAdient algoritHm (SARAH), as well as its practical variant SARAH+, as a novel approach to the finite-sum minimization problems. Different from the vanilla SGD and other modern stochastic methods such as SVRG, S2GD, SAG and SAGA, SARAH admits a simple recursive framework for updating stochastic gradient estimates; when comparing to SAG/SAGA, SARAH does not require a storage of past gradients. The linear convergence rate of SARAH is proven under strong convexity assumption. We also prove a linear convergence rate (in the strongly convex case) for an inner loop of SARAH, the property that SVRG does not possess. Numerical experiments demonstrate the efficiency of our algorithm.", "bibtex": "@InProceedings{pmlr-v70-nguyen17b,\n title = \t {{SARAH}: A Novel Method for Machine Learning Problems Using Stochastic Recursive Gradient},\n author = {Lam M. Nguyen and Jie Liu and Katya Scheinberg and Martin Tak{\\'a}{\\v{c}}},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2613--2621},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/nguyen17b/nguyen17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/nguyen17b.html},\n abstract = \t {In this paper, we propose a StochAstic Recursive grAdient algoritHm (SARAH), as well as its practical variant SARAH+, as a novel approach to the finite-sum minimization problems. Different from the vanilla SGD and other modern stochastic methods such as SVRG, S2GD, SAG and SAGA, SARAH admits a simple recursive framework for updating stochastic gradient estimates; when comparing to SAG/SAGA, SARAH does not require a storage of past gradients. The linear convergence rate of SARAH is proven under strong convexity assumption. We also prove a linear convergence rate (in the strongly convex case) for an inner loop of SARAH, the property that SVRG does not possess. Numerical experiments demonstrate the efficiency of our algorithm.}\n}", "pdf": "http://proceedings.mlr.press/v70/nguyen17b/nguyen17b.pdf", "supp": "", "pdf_size": 570835, "gs_citation": 763, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15168434346098746144&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Department of Industrial and Systems Engineering, Lehigh University, USA; Department of Industrial and Systems Engineering, Lehigh University, USA; Department of Industrial and Systems Engineering, Lehigh University, USA + The University of Oxford, UK; Department of Industrial and Systems Engineering, Lehigh University, USA", "aff_domain": "gmail.com;gmail.com;lehigh.edu;gmail.com", "email": "gmail.com;gmail.com;lehigh.edu;gmail.com", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/nguyen17b.html", "aff_unique_index": "0;0;0+1;0", "aff_unique_norm": "Lehigh University;University of Oxford", "aff_unique_dep": "Department of Industrial and Systems Engineering;", "aff_unique_url": "https://www.lehigh.edu;https://www.ox.ac.uk", "aff_unique_abbr": "Lehigh;Oxford", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0+1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "SPLICE: Fully Tractable Hierarchical Extension of ICA with Pooling", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/691", "id": "691", "author_site": "Jun-ichiro Hirayama, Aapo Hyv\u00e4rinen, Motoaki Kawanabe", "author": "Jun-ichiro Hirayama; Aapo Hyv\u00e4rinen; Motoaki Kawanabe", "abstract": "We present a novel probabilistic framework for a hierarchical extension of independent component analysis (ICA), with a particular motivation in neuroscientific data analysis and modeling. The framework incorporates a general subspace pooling with linear ICA-like layers stacked recursively. Unlike related previous models, our generative model is fully tractable: both the likelihood and the posterior estimates of latent variables can readily be computed with analytically simple formulae. The model is particularly simple in the case of complex-valued data since the pooling can be reduced to taking the modulus of complex numbers. Experiments on electroencephalography (EEG) and natural images demonstrate the validity of the method.", "bibtex": "@InProceedings{pmlr-v70-hirayama17a,\n title = \t {{SPLICE}: Fully Tractable Hierarchical Extension of {ICA} with Pooling},\n author = {Jun-ichiro Hirayama and Aapo Hyv{\\\"a}rinen and Motoaki Kawanabe},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1491--1500},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hirayama17a/hirayama17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/hirayama17a.html},\n abstract = \t {We present a novel probabilistic framework for a hierarchical extension of independent component analysis (ICA), with a particular motivation in neuroscientific data analysis and modeling. The framework incorporates a general subspace pooling with linear ICA-like layers stacked recursively. Unlike related previous models, our generative model is fully tractable: both the likelihood and the posterior estimates of latent variables can readily be computed with analytically simple formulae. The model is particularly simple in the case of complex-valued data since the pooling can be reduced to taking the modulus of complex numbers. Experiments on electroencephalography (EEG) and natural images demonstrate the validity of the method.}\n}", "pdf": "http://proceedings.mlr.press/v70/hirayama17a/hirayama17a.pdf", "supp": "", "pdf_size": 569104, "gs_citation": 8, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7452617085325839295&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "RIKEN Center for Advanced Intelligence Project (AIP), Tokyo, Japan + Advanced Telecommunications Research Institute International (ATR), Kyoto, Japan; Department of Computer Science and HIIT, University of Helsinki, Finland + Gatsby Computational Neuroscience Unit, University College London, UK; Advanced Telecommunications Research Institute International (ATR), Kyoto, Japan", "aff_domain": "riken.jp; ; ", "email": "riken.jp; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/hirayama17a.html", "aff_unique_index": "0+1;2+3;1", "aff_unique_norm": "RIKEN Center for Advanced Intelligence Project;Advanced Telecommunications Research Institute International;University of Helsinki;University College London", "aff_unique_dep": "Advanced Intelligence Project;;Department of Computer Science and HIIT;Gatsby Computational Neuroscience Unit", "aff_unique_url": "https://aipcenter.riken.jp/en/;https://www.atr.jp;https://www.helsinki.fi;https://www.ucl.ac.uk", "aff_unique_abbr": "RIKEN AIP;ATR;UH;UCL", "aff_campus_unique_index": "0+1;3;1", "aff_campus_unique": "Tokyo;Kyoto;;London", "aff_country_unique_index": "0+0;1+2;0", "aff_country_unique": "Japan;Finland;United Kingdom" }, { "title": "Safety-Aware Algorithms for Adversarial Contextual Bandit", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/628", "id": "628", "author_site": "Wen Sun, Debadeepta Dey, Ashish Kapoor", "author": "Wen Sun; Debadeepta Dey; Ashish Kapoor", "abstract": "In this work we study the safe sequential decision making problem under the setting of adversarial contextual bandits with sequential risk constraints. At each round, nature prepares a context, a cost for each arm, and additionally a risk for each arm. The learner leverages the context to pull an arm and receives the corresponding cost and risk associated with the pulled arm. In addition to minimizing the cumulative cost, for safety purposes, the learner needs to make safe decisions such that the average of the cumulative risk from all pulled arms should not be larger than a pre-defined threshold. To address this problem, we first study online convex programming in the full information setting where in each round the learner receives an adversarial convex loss and a convex constraint. We develop a meta algorithm leveraging online mirror descent for the full information setting and then extend it to contextual bandit with sequential risk constraints setting using expert advice. Our algorithms can achieve near-optimal regret in terms of minimizing the total cost, while successfully maintaining a sub-linear growth of accumulative risk constraint violation. We support our theoretical results by demonstrating our algorithm on a simple simulated robotics reactive control task.", "bibtex": "@InProceedings{pmlr-v70-sun17a,\n title = \t {Safety-Aware Algorithms for Adversarial Contextual Bandit},\n author = {Wen Sun and Debadeepta Dey and Ashish Kapoor},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3280--3288},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sun17a/sun17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sun17a.html},\n abstract = \t {In this work we study the safe sequential decision making problem under the setting of adversarial contextual bandits with sequential risk constraints. At each round, nature prepares a context, a cost for each arm, and additionally a risk for each arm. The learner leverages the context to pull an arm and receives the corresponding cost and risk associated with the pulled arm. In addition to minimizing the cumulative cost, for safety purposes, the learner needs to make safe decisions such that the average of the cumulative risk from all pulled arms should not be larger than a pre-defined threshold. To address this problem, we first study online convex programming in the full information setting where in each round the learner receives an adversarial convex loss and a convex constraint. We develop a meta algorithm leveraging online mirror descent for the full information setting and then extend it to contextual bandit with sequential risk constraints setting using expert advice. Our algorithms can achieve near-optimal regret in terms of minimizing the total cost, while successfully maintaining a sub-linear growth of accumulative risk constraint violation. We support our theoretical results by demonstrating our algorithm on a simple simulated robotics reactive control task.}\n}", "pdf": "http://proceedings.mlr.press/v70/sun17a/sun17a.pdf", "supp": "", "pdf_size": 808677, "gs_citation": 85, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1039982554285127146&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Robotics Institute, Carnegie Mellon University, USA; Microsoft Research, Redmond, USA; Microsoft Research, Redmond, USA", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/sun17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "Carnegie Mellon University;Microsoft", "aff_unique_dep": "Robotics Institute;Microsoft Research", "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "CMU;MSR", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Redmond", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Scalable Bayesian Rule Lists", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/735", "id": "735", "author_site": "Hongyu Yang, Cynthia Rudin, Margo Seltzer", "author": "Hongyu Yang; Cynthia Rudin; Margo Seltzer", "abstract": "We present an algorithm for building probabilistic rule lists that is two orders of magnitude faster than previous work. Rule list algorithms are competitors for decision tree algorithms. They are associative classifiers, in that they are built from pre-mined association rules. They have a logical structure that is a sequence of IF-THEN rules, identical to a decision list or one-sided decision tree. Instead of using greedy splitting and pruning like decision tree algorithms, we aim to fully optimize over rule lists, striking a practical balance between accuracy, interpretability, and computational speed. The algorithm presented here uses a mixture of theoretical bounds (tight enough to have practical implications as a screening or bounding procedure), computational reuse, and highly tuned language libraries to achieve computational efficiency. Currently, for many practical problems, this method achieves better accuracy and sparsity than decision trees. In many cases, the computational time is practical and often less than that of decision trees.", "bibtex": "@InProceedings{pmlr-v70-yang17h,\n title = \t {Scalable {B}ayesian Rule Lists},\n author = {Hongyu Yang and Cynthia Rudin and Margo Seltzer},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3921--3930},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17h/yang17h.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17h.html},\n abstract = \t {We present an algorithm for building probabilistic rule lists that is two orders of magnitude faster than previous work. Rule list algorithms are competitors for decision tree algorithms. They are associative classifiers, in that they are built from pre-mined association rules. They have a logical structure that is a sequence of IF-THEN rules, identical to a decision list or one-sided decision tree. Instead of using greedy splitting and pruning like decision tree algorithms, we aim to fully optimize over rule lists, striking a practical balance between accuracy, interpretability, and computational speed. The algorithm presented here uses a mixture of theoretical bounds (tight enough to have practical implications as a screening or bounding procedure), computational reuse, and highly tuned language libraries to achieve computational efficiency. Currently, for many practical problems, this method achieves better accuracy and sparsity than decision trees. In many cases, the computational time is practical and often less than that of decision trees.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17h/yang17h.pdf", "supp": "", "pdf_size": 820583, "gs_citation": 265, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12139062942580489052&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Massachusetts Institute of Technology, Cambridge, Massachusetts, USA; Duke University, Durham, North Carolina, USA; Harvard University, Cambridge, Massachusetts, USA", "aff_domain": "mit.edu; ; ", "email": "mit.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/yang17h.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Massachusetts Institute of Technology;Duke University;Harvard University", "aff_unique_dep": ";;", "aff_unique_url": "https://www.mit.edu;https://www.duke.edu;https://www.harvard.edu", "aff_unique_abbr": "MIT;Duke;Harvard", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Cambridge;Durham", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Scalable Generative Models for Multi-label Learning with Missing Labels", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/708", "id": "708", "author_site": "Vikas Jain, Nirbhay Modhe, Piyush Rai", "author": "Vikas Jain; Nirbhay Modhe; Piyush Rai", "abstract": "We present a scalable, generative framework for multi-label learning with missing labels. Our framework consists of a latent factor model for the binary label matrix, which is coupled with an exposure model to account for label missingness (i.e., whether a zero in the label matrix is indeed a zero or denotes a missing observation). The underlying latent factor model also assumes that the low-dimensional embeddings of each label vector are directly conditioned on the respective feature vector of that example. Our generative framework admits a simple inference procedure, such that the parameter estimation reduces to a sequence of simple weighted least-square regression problems, each of which can be solved easily, efficiently, and in parallel. Moreover, inference can also be performed in an online fashion using mini-batches of training examples, which makes our framework scalable for large data sets, even when using moderate computational resources. We report both quantitative and qualitative results for our framework on several benchmark data sets, comparing it with a number of state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v70-jain17a,\n title = \t {Scalable Generative Models for Multi-label Learning with Missing Labels},\n author = {Vikas Jain and Nirbhay Modhe and Piyush Rai},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1636--1644},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jain17a/jain17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jain17a.html},\n abstract = \t {We present a scalable, generative framework for multi-label learning with missing labels. Our framework consists of a latent factor model for the binary label matrix, which is coupled with an exposure model to account for label missingness (i.e., whether a zero in the label matrix is indeed a zero or denotes a missing observation). The underlying latent factor model also assumes that the low-dimensional embeddings of each label vector are directly conditioned on the respective feature vector of that example. Our generative framework admits a simple inference procedure, such that the parameter estimation reduces to a sequence of simple weighted least-square regression problems, each of which can be solved easily, efficiently, and in parallel. Moreover, inference can also be performed in an online fashion using mini-batches of training examples, which makes our framework scalable for large data sets, even when using moderate computational resources. We report both quantitative and qualitative results for our framework on several benchmark data sets, comparing it with a number of state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/jain17a/jain17a.pdf", "supp": "", "pdf_size": 459634, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17776249770075027961&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Department of Computer Science and Engineering, IIT Kanpur, Kanpur 208016, UP, India; Department of Computer Science and Engineering, IIT Kanpur, Kanpur 208016, UP, India; Department of Computer Science and Engineering, IIT Kanpur, Kanpur 208016, UP, India", "aff_domain": "iitk.ac.in;iitk.ac.in;cse.iitk.ac.in", "email": "iitk.ac.in;iitk.ac.in;cse.iitk.ac.in", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/jain17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "IIT Kanpur", "aff_unique_dep": "Department of Computer Science and Engineering", "aff_unique_url": "https://www.iitk.ac.in", "aff_unique_abbr": "IITK", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Kanpur", "aff_country_unique_index": "0;0;0", "aff_country_unique": "India" }, { "title": "Scalable Multi-Class Gaussian Process Classification using Expectation Propagation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/506", "id": "506", "author_site": "Carlos Villacampa-Calvo, Daniel Hernandez-Lobato", "author": "Carlos Villacampa-Calvo; Daniel Hern\u00e1ndez-Lobato", "abstract": "This paper describes an expectation propagation (EP) method for multi-class classification with Gaussian processes that scales well to very large datasets. In such a method the estimate of the log-marginal-likelihood involves a sum across the data instances. This enables efficient training using stochastic gradients and mini-batches. When this type of training is used, the computational cost does not depend on the number of data instances N. Furthermore, extra assumptions in the approximate inference process make the memory cost independent of N. The consequence is that the proposed EP method can be used on datasets with millions of instances. We compare empirically this method with alternative approaches that approximate the required computations using variational inference. The results show that it performs similar or even better than these techniques, which sometimes give significantly worse predictive distributions in terms of the test log-likelihood. Besides this, the training process of the proposed approach also seems to converge in a smaller number of iterations.", "bibtex": "@InProceedings{pmlr-v70-villacampa-calvo17a,\n title = \t {Scalable Multi-Class {G}aussian Process Classification using Expectation Propagation},\n author = {Carlos Villacampa-Calvo and Daniel Hern{\\'a}ndez-Lobato},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3550--3559},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/villacampa-calvo17a/villacampa-calvo17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/villacampa-calvo17a.html},\n abstract = \t {This paper describes an expectation propagation (EP) method for multi-class classification with Gaussian processes that scales well to very large datasets. In such a method the estimate of the log-marginal-likelihood involves a sum across the data instances. This enables efficient training using stochastic gradients and mini-batches. When this type of training is used, the computational cost does not depend on the number of data instances N. Furthermore, extra assumptions in the approximate inference process make the memory cost independent of N. The consequence is that the proposed EP method can be used on datasets with millions of instances. We compare empirically this method with alternative approaches that approximate the required computations using variational inference. The results show that it performs similar or even better than these techniques, which sometimes give significantly worse predictive distributions in terms of the test log-likelihood. Besides this, the training process of the proposed approach also seems to converge in a smaller number of iterations.}\n}", "pdf": "http://proceedings.mlr.press/v70/villacampa-calvo17a/villacampa-calvo17a.pdf", "supp": "", "pdf_size": 4364332, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10199590340516718971&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Universidad Aut\u00f3noma de Madrid; Universidad Aut\u00f3noma de Madrid", "aff_domain": "uam.es; ", "email": "uam.es; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/villacampa-calvo17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Universidad Aut\u00f3noma de Madrid", "aff_unique_dep": "", "aff_unique_url": "https://www.uam.es", "aff_unique_abbr": "UAM", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Spain" }, { "title": "Scaling Up Sparse Support Vector Machines by Simultaneous Feature and Sample Reduction", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/569", "id": "569", "author_site": "Weizhong Zhang, Bin Hong, Wei Liu, Jieping Ye, Deng Cai, Xiaofei He, Jie Wang", "author": "Weizhong Zhang; Bin Hong; Wei Liu; Jieping Ye; Deng Cai; Xiaofei He; Jie Wang", "abstract": "Sparse support vector machine (SVM) is a popular classification technique that can simultaneously learn a small set of the most interpretable features and identify the support vectors. It has achieved great successes in many real-world applications. However, for large-scale problems involving a huge number of samples and extremely high-dimensional features, solving sparse SVMs remains challenging. By noting that sparse SVMs induce sparsities in both feature and sample spaces, we propose a novel approach, which is based on accurate estimations of the primal and dual optima of sparse SVMs, to simultaneously identify the features and samples that are guaranteed to be irrelevant to the outputs. Thus, we can remove the identified inactive samples and features from the training phase, leading to substantial savings in both the memory usage and computational cost without sacrificing accuracy. To the best of our knowledge, the proposed method is the", "bibtex": "@InProceedings{pmlr-v70-zhang17c,\n title = \t {Scaling Up Sparse Support Vector Machines by Simultaneous Feature and Sample Reduction},\n author = {Weizhong Zhang and Bin Hong and Wei Liu and Jieping Ye and Deng Cai and Xiaofei He and Jie Wang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4016--4025},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17c/zhang17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17c.html},\n abstract = \t {Sparse support vector machine (SVM) is a popular classification technique that can simultaneously learn a small set of the most interpretable features and identify the support vectors. It has achieved great successes in many real-world applications. However, for large-scale problems involving a huge number of samples and extremely high-dimensional features, solving sparse SVMs remains challenging. By noting that sparse SVMs induce sparsities in both feature and sample spaces, we propose a novel approach, which is based on accurate estimations of the primal and dual optima of sparse SVMs, to simultaneously identify the features and samples that are guaranteed to be irrelevant to the outputs. Thus, we can remove the identified inactive samples and features from the training phase, leading to substantial savings in both the memory usage and computational cost without sacrificing accuracy. To the best of our knowledge, the proposed method is the", "pdf": "http://proceedings.mlr.press/v70/zhang17c/zhang17c.pdf", "supp": "", "pdf_size": 572539, "gs_citation": 46, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7111193362684407403&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "State Key Lab of CAD &CG, Zhejiang University, China+Tencent AI Lab, Shenzhen, China; State Key Lab of CAD &CG, Zhejiang University, China+Tencent AI Lab, Shenzhen, China; Tencent AI Lab, Shenzhen, China; University of Michigan, USA; State Key Lab of CAD &CG, Zhejiang University, China; State Key Lab of CAD &CG, Zhejiang University, China; University of Michigan, USA", "aff_domain": "gmail.com; ; ; ; ; ;gmail.com", "email": "gmail.com; ; ; ; ; ;gmail.com", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/zhang17c.html", "aff_unique_index": "0+1;0+1;1;2;0;0;2", "aff_unique_norm": "Zhejiang University;Tencent;University of Michigan", "aff_unique_dep": "State Key Lab of CAD &CG;AI Lab;", "aff_unique_url": "http://www.zju.edu.cn;https://ai.tencent.com;https://www.umich.edu", "aff_unique_abbr": "ZJU;Tencent AI Lab;UM", "aff_campus_unique_index": "1;1;1", "aff_campus_unique": ";Shenzhen", "aff_country_unique_index": "0+0;0+0;0;1;0;0;1", "aff_country_unique": "China;United States" }, { "title": "Schema Networks: Zero-shot Transfer with a Generative Causal Model of Intuitive Physics", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/652", "id": "652", "author_site": "Ken Kansky, Thomas Silver, David A M\u00e9ly, Mohamed Eldawy, Miguel Lazaro-Gredilla, Xinghua Lou, Nimrod Dorfman, Szymon Sidor, Scott Phoenix, Dileep George", "author": "Ken Kansky; Tom Silver; David A. M\u00e9ly; Mohamed Eldawy; Miguel L\u00e1zaro-Gredilla; Xinghua Lou; Nimrod Dorfman; Szymon Sidor; Scott Phoenix; Dileep George", "abstract": "The recent adaptation of deep neural network-based methods to reinforcement learning and planning domains has yielded remarkable progress on individual tasks. Nonetheless, progress on task-to-task transfer remains limited. In pursuit of efficient and robust generalization, we introduce the Schema Network, an object-oriented generative physics simulator capable of disentangling multiple causes of events and reasoning backward through causes to achieve goals. The richly structured architecture of the Schema Network can learn the dynamics of an environment directly from data. We compare Schema Networks with Asynchronous Advantage Actor-Critic and Progressive Networks on a suite of Breakout variations, reporting results on training efficiency and zero-shot generalization, consistently demonstrating faster, more robust learning and better transfer. We argue that generalizing from limited data and learning causal relationships are essential abilities on the path toward generally intelligent systems.", "bibtex": "@InProceedings{pmlr-v70-kansky17a,\n title = \t {Schema Networks: Zero-shot Transfer with a Generative Causal Model of Intuitive Physics},\n author = {Ken Kansky and Tom Silver and David A. M{\\'e}ly and Mohamed Eldawy and Miguel L{\\'a}zaro-Gredilla and Xinghua Lou and Nimrod Dorfman and Szymon Sidor and Scott Phoenix and Dileep George},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1809--1818},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kansky17a/kansky17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kansky17a.html},\n abstract = \t {The recent adaptation of deep neural network-based methods to reinforcement learning and planning domains has yielded remarkable progress on individual tasks. Nonetheless, progress on task-to-task transfer remains limited. In pursuit of efficient and robust generalization, we introduce the Schema Network, an object-oriented generative physics simulator capable of disentangling multiple causes of events and reasoning backward through causes to achieve goals. The richly structured architecture of the Schema Network can learn the dynamics of an environment directly from data. We compare Schema Networks with Asynchronous Advantage Actor-Critic and Progressive Networks on a suite of Breakout variations, reporting results on training efficiency and zero-shot generalization, consistently demonstrating faster, more robust learning and better transfer. We argue that generalizing from limited data and learning causal relationships are essential abilities on the path toward generally intelligent systems.}\n}", "pdf": "http://proceedings.mlr.press/v70/kansky17a/kansky17a.pdf", "supp": "", "pdf_size": 873508, "gs_citation": 300, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10144461492904583282&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA; Vicarious AI, California, USA", "aff_domain": "vicarious.com;vicarious.com; ; ; ; ; ; ; ; ", "email": "vicarious.com;vicarious.com; ; ; ; ; ; ; ; ", "github": "", "project": "", "author_num": 10, "oa": "https://proceedings.mlr.press/v70/kansky17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "Vicarious AI", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "California", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Second-Order Kernel Online Convex Optimization with Adaptive Sketching", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/812", "id": "812", "author_site": "Daniele Calandriello, Alessandro Lazaric, Michal Valko", "author": "Daniele Calandriello; Alessandro Lazaric; Michal Valko", "abstract": "Kernel online convex optimization (KOCO) is a framework combining the expressiveness of non-parametric kernel models with the regret guarantees of online learning. First-order KOCO methods such as functional gradient descent require only $O(t)$ time and space per iteration, and, when the only information on the losses is their convexity, achieve a minimax optimal $O(\\sqrt{T})$ regret. Nonetheless, many common losses in kernel problems, such as squared loss, logistic loss, and squared hinge loss posses stronger curvature that can be exploited. In this case, second-order KOCO methods achieve $O(\\log(\\mathrm{Det}(K)))$ regret, which we show scales as $O(deff \\log T)$, where $deff$ is the effective dimension of the problem and is usually much smaller than $O(\\sqrt{T})$. The main drawback of second-order methods is their much higher $O(t^2)$ space and time complexity. In this paper, we introduce kernel online Newton step (KONS), a new second-order KOCO method that also achieves $O(deff\\log T)$ regret. To address the computational complexity of second-order methods, we introduce a new matrix sketching algorithm for the kernel matrix~$K$, and show that for a chosen parameter $\\gamma \\leq 1$ our Sketched-KONS reduces the space and time complexity by a factor of $\\gamma^2$ to $O(t^2\\gamma^2)$ space and time per iteration, while incurring only $1/\\gamma$ times more regret.", "bibtex": "@InProceedings{pmlr-v70-calandriello17a,\n title = \t {Second-Order Kernel Online Convex Optimization with Adaptive Sketching},\n author = {Daniele Calandriello and Alessandro Lazaric and Michal Valko},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {645--653},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/calandriello17a/calandriello17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/calandriello17a.html},\n abstract = \t {Kernel online convex optimization (KOCO) is a framework combining the expressiveness of non-parametric kernel models with the regret guarantees of online learning. First-order KOCO methods such as functional gradient descent require only $O(t)$ time and space per iteration, and, when the only information on the losses is their convexity, achieve a minimax optimal $O(\\sqrt{T})$ regret. Nonetheless, many common losses in kernel problems, such as squared loss, logistic loss, and squared hinge loss posses stronger curvature that can be exploited. In this case, second-order KOCO methods achieve $O(\\log(\\mathrm{Det}(K)))$ regret, which we show scales as $O(deff \\log T)$, where $deff$ is the effective dimension of the problem and is usually much smaller than $O(\\sqrt{T})$. The main drawback of second-order methods is their much higher $O(t^2)$ space and time complexity. In this paper, we introduce kernel online Newton step (KONS), a new second-order KOCO method that also achieves $O(deff\\log T)$ regret. To address the computational complexity of second-order methods, we introduce a new matrix sketching algorithm for the kernel matrix~$K$, and show that for a chosen parameter $\\gamma \\leq 1$ our Sketched-KONS reduces the space and time complexity by a factor of $\\gamma^2$ to $O(t^2\\gamma^2)$ space and time per iteration, while incurring only $1/\\gamma$ times more regret.}\n}", "pdf": "http://proceedings.mlr.press/v70/calandriello17a/calandriello17a.pdf", "supp": "", "pdf_size": 339693, "gs_citation": 48, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7115556630344799127&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 16, "aff": "SequeL team, INRIA Lille - Nord Europe; SequeL team, INRIA Lille - Nord Europe; SequeL team, INRIA Lille - Nord Europe", "aff_domain": "inria.fr; ; ", "email": "inria.fr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/calandriello17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "INRIA Lille - Nord Europe", "aff_unique_dep": "SequeL team", "aff_unique_url": "https://www.inria.fr/en/centre/lille-nord-europe", "aff_unique_abbr": "INRIA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Lille", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "Selective Inference for Sparse High-Order Interaction Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/801", "id": "801", "author_site": "Shinya Suzumura, Kazuya Nakagawa, Yuta Umezu, Koji Tsuda, Ichiro Takeuchi", "author": "Shinya Suzumura; Kazuya Nakagawa; Yuta Umezu; Koji Tsuda; Ichiro Takeuchi", "abstract": "Finding statistically significant high-order interactions in predictive modeling is important but challenging task because the possible number of high-order interactions is extremely large (e.g., $> 10^{17}$). In this paper we study feature selection and statistical inference for sparse high-order interaction models. Our main contribution is to extend recently developed selective inference framework for linear models to high-order interaction models by developing a novel algorithm for efficiently characterizing the selection event for the selective inference of high-order interactions. We demonstrate the effectiveness of the proposed algorithm by applying it to an HIV drug response prediction problem.", "bibtex": "@InProceedings{pmlr-v70-suzumura17a,\n title = \t {Selective Inference for Sparse High-Order Interaction Models},\n author = {Shinya Suzumura and Kazuya Nakagawa and Yuta Umezu and Koji Tsuda and Ichiro Takeuchi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3338--3347},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/suzumura17a/suzumura17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/suzumura17a.html},\n abstract = \t {Finding statistically significant high-order interactions in predictive modeling is important but challenging task because the possible number of high-order interactions is extremely large (e.g., $> 10^{17}$). In this paper we study feature selection and statistical inference for sparse high-order interaction models. Our main contribution is to extend recently developed selective inference framework for linear models to high-order interaction models by developing a novel algorithm for efficiently characterizing the selection event for the selective inference of high-order interactions. We demonstrate the effectiveness of the proposed algorithm by applying it to an HIV drug response prediction problem.}\n}", "pdf": "http://proceedings.mlr.press/v70/suzumura17a/suzumura17a.pdf", "supp": "", "pdf_size": 826301, "gs_citation": 54, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11623171192507105448&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 3, "aff": "Nagoya Institute of Technology, Nagoya, Japan; Nagoya Institute of Technology, Nagoya, Japan; Nagoya Institute of Technology, Nagoya, Japan; University of Tokyo, Tokyo, Japan + RIKEN, Tokyo, Japan; Nagoya Institute of Technology, Nagoya, Japan + RIKEN, Tokyo, Japan", "aff_domain": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp;utoronto.ca;nitech.ac.jp", "email": "nitech.ac.jp;nitech.ac.jp;nitech.ac.jp;utoronto.ca;nitech.ac.jp", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/suzumura17a.html", "aff_unique_index": "0;0;0;1+2;0+2", "aff_unique_norm": "Nagoya Institute of Technology;University of Tokyo;RIKEN", "aff_unique_dep": ";;", "aff_unique_url": "https://www.nitech.ac.jp;https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "NIT;UTokyo;RIKEN", "aff_campus_unique_index": "0;0;0;1+1;0+1", "aff_campus_unique": "Nagoya;Tokyo", "aff_country_unique_index": "0;0;0;0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Self-Paced Co-training", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/518", "id": "518", "author_site": "Fan Ma, Deyu Meng, Qi Xie, Zina Li, Xuanyi Dong", "author": "Fan Ma; Deyu Meng; Qi Xie; Zina Li; Xuanyi Dong", "abstract": "Co-training is a well-known semi-supervised learning approach which trains classifiers on two different views and exchanges labels of unlabeled instances in an iterative way. During co-training process, labels of unlabeled instances in the training pool are very likely to be false especially in the initial training rounds, while the standard co-training algorithm utilizes a \u201cdraw without replacement\u201d manner and does not remove these false labeled instances from training. This issue not only tends to degenerate its performance but also hampers its fundamental theory. Besides, there is no optimization model to explain what objective a cotraining process optimizes. To these issues, in this study we design a new co-training algorithm named self-paced cotraining (SPaCo) with a \u201cdraw with replacement\u201d learning mode. The rationality of SPaCo can be proved under theoretical assumptions utilized in traditional co-training research, and furthermore, the algorithm exactly complies with the alternative optimization process for an optimization model of self-paced curriculum learning, which can be finely explained in robust learning manner. Experimental results substantiate the superiority of the proposed method as compared with current state-of-the-art co-training methods.", "bibtex": "@InProceedings{pmlr-v70-ma17b,\n title = \t {Self-Paced Co-training},\n author = {Fan Ma and Deyu Meng and Qi Xie and Zina Li and Xuanyi Dong},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2275--2284},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ma17b/ma17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/ma17b.html},\n abstract = \t {Co-training is a well-known semi-supervised learning approach which trains classifiers on two different views and exchanges labels of unlabeled instances in an iterative way. During co-training process, labels of unlabeled instances in the training pool are very likely to be false especially in the initial training rounds, while the standard co-training algorithm utilizes a \u201cdraw without replacement\u201d manner and does not remove these false labeled instances from training. This issue not only tends to degenerate its performance but also hampers its fundamental theory. Besides, there is no optimization model to explain what objective a cotraining process optimizes. To these issues, in this study we design a new co-training algorithm named self-paced cotraining (SPaCo) with a \u201cdraw with replacement\u201d learning mode. The rationality of SPaCo can be proved under theoretical assumptions utilized in traditional co-training research, and furthermore, the algorithm exactly complies with the alternative optimization process for an optimization model of self-paced curriculum learning, which can be finely explained in robust learning manner. Experimental results substantiate the superiority of the proposed method as compared with current state-of-the-art co-training methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/ma17b/ma17b.pdf", "supp": "", "pdf_size": 272089, "gs_citation": 172, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18172417705040589206&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Xi'an Jiaotong University; Xi'an Jiaotong University; Xi'an Jiaotong University; Xi'an Jiaotong University; University of Technology Sydney", "aff_domain": "xjtu.edu.cn; ; ; ; ", "email": "xjtu.edu.cn; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/ma17b.html", "aff_unique_index": "0;0;0;0;1", "aff_unique_norm": "Xi'an Jiao Tong University;University of Technology Sydney", "aff_unique_dep": ";", "aff_unique_url": "https://www.xjtu.edu.cn;https://www.uts.edu.au", "aff_unique_abbr": "XJTU;UTS", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;1", "aff_country_unique": "China;Australia" }, { "title": "Semi-Supervised Classification Based on Classification from Positive and Unlabeled Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/603", "id": "603", "author_site": "Tomoya Sakai, Marthinus C du Plessis, Gang Niu, Masashi Sugiyama", "author": "Tomoya Sakai; Marthinus Christoffel Plessis; Gang Niu; Masashi Sugiyama", "abstract": "Most of the semi-supervised classification methods developed so far use unlabeled data for regularization purposes under particular distributional assumptions such as the cluster assumption. In contrast, recently developed methods of classification from positive and unlabeled data (PU classification) use unlabeled data for risk evaluation, i.e., label information is directly extracted from unlabeled data. In this paper, we extend PU classification to also incorporate negative data and propose a novel semi-supervised learning approach. We establish generalization error bounds for our novel methods and show that the bounds decrease with respect to the number of unlabeled data without the distributional assumptions that are required in existing semi-supervised learning methods. Through experiments, we demonstrate the usefulness of the proposed methods.", "bibtex": "@InProceedings{pmlr-v70-sakai17a,\n title = \t {Semi-Supervised Classification Based on Classification from Positive and Unlabeled Data},\n author = {Tomoya Sakai and Marthinus Christoffel du Plessis and Gang Niu and Masashi Sugiyama},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2998--3006},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sakai17a/sakai17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sakai17a.html},\n abstract = \t {Most of the semi-supervised classification methods developed so far use unlabeled data for regularization purposes under particular distributional assumptions such as the cluster assumption. In contrast, recently developed methods of classification from positive and unlabeled data (PU classification) use unlabeled data for risk evaluation, i.e., label information is directly extracted from unlabeled data. In this paper, we extend PU classification to also incorporate negative data and propose a novel semi-supervised learning approach. We establish generalization error bounds for our novel methods and show that the bounds decrease with respect to the number of unlabeled data without the distributional assumptions that are required in existing semi-supervised learning methods. Through experiments, we demonstrate the usefulness of the proposed methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/sakai17a/sakai17a.pdf", "supp": "", "pdf_size": 212471, "gs_citation": 138, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7653716358950540966&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 9, "aff": "The University of Tokyo, Japan+RIKEN, Japan; The University of Tokyo, Japan; The University of Tokyo, Japan; RIKEN, Japan", "aff_domain": "ms.k.u-tokyo.ac.jp; ; ; ", "email": "ms.k.u-tokyo.ac.jp; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/sakai17a.html", "aff_unique_index": "0+1;0;0;1", "aff_unique_norm": "University of Tokyo;RIKEN", "aff_unique_dep": ";", "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.riken.jp", "aff_unique_abbr": "UTokyo;RIKEN", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0;0", "aff_country_unique": "Japan" }, { "title": "Sequence Modeling via Segmentations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/513", "id": "513", "author_site": "Chong Wang, Yining Wang, Po-Sen Huang, Abdelrahman Mohammad, Dengyong Zhou, Li Deng", "author": "Chong Wang; Yining Wang; Po-Sen Huang; Abdelrahman Mohamed; Dengyong Zhou; Li Deng", "abstract": "Segmental structure is a common pattern in many types of sequences such as phrases in human languages. In this paper, we present a probabilistic model for sequences via their segmentations. The probability of a segmented sequence is calculated as the product of the probabilities of all its segments, where each segment is modeled using existing tools such as recurrent neural networks. Since the segmentation of a sequence is usually unknown in advance, we sum over all valid segmentations to obtain the final probability for the sequence. An efficient dynamic programming algorithm is developed for forward and backward computations without resorting to any approximation. We demonstrate our approach on text segmentation and speech recognition tasks. In addition to quantitative results, we also show that our approach can discover meaningful segments in their respective application contexts.", "bibtex": "@InProceedings{pmlr-v70-wang17j,\n title = \t {Sequence Modeling via Segmentations},\n author = {Chong Wang and Yining Wang and Po-Sen Huang and Abdelrahman Mohamed and Dengyong Zhou and Li Deng},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3674--3683},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17j/wang17j.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17j.html},\n abstract = \t {Segmental structure is a common pattern in many types of sequences such as phrases in human languages. In this paper, we present a probabilistic model for sequences via their segmentations. The probability of a segmented sequence is calculated as the product of the probabilities of all its segments, where each segment is modeled using existing tools such as recurrent neural networks. Since the segmentation of a sequence is usually unknown in advance, we sum over all valid segmentations to obtain the final probability for the sequence. An efficient dynamic programming algorithm is developed for forward and backward computations without resorting to any approximation. We demonstrate our approach on text segmentation and speech recognition tasks. In addition to quantitative results, we also show that our approach can discover meaningful segments in their respective application contexts.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17j/wang17j.pdf", "supp": "", "pdf_size": 1226193, "gs_citation": 50, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16555690786146249297&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Microsoft Research; Carnegie Mellon University; Microsoft Research; Amazon; Microsoft Research; Citadel Securities LLC", "aff_domain": "microsoft.com; ; ; ; ; ", "email": "microsoft.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/wang17j.html", "aff_unique_index": "0;1;0;2;0;3", "aff_unique_norm": "Microsoft;Carnegie Mellon University;Amazon;Citadel Securities", "aff_unique_dep": "Microsoft Research;;Amazon.com, Inc.;", "aff_unique_url": "https://www.microsoft.com/en-us/research;https://www.cmu.edu;https://www.amazon.com;https://www.citadel.com", "aff_unique_abbr": "MSR;CMU;Amazon;Citadel", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Sequence Tutor: Conservative Fine-Tuning of Sequence Generation Models with KL-control", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/814", "id": "814", "author_site": "Natasha Jaques, Shixiang Gu, Dzmitry Bahdanau, Jose Miguel Hernandez-Lobato, Richard E Turner, Douglas Eck", "author": "Natasha Jaques; Shixiang Gu; Dzmitry Bahdanau; Jos\u00e9 Miguel Hern\u00e1ndez-Lobato; Richard E. Turner; Douglas Eck", "abstract": "This paper proposes a general method for improving the structure and quality of sequences generated by a recurrent neural network (RNN), while maintaining information originally learned from data, as well as sample diversity. An RNN is first pre-trained on data using maximum likelihood estimation (MLE), and the probability distribution over the next token in the sequence learned by this model is treated as a prior policy. Another RNN is then trained using reinforcement learning (RL) to generate higher-quality outputs that account for domain-specific incentives while retaining proximity to the prior policy of the MLE RNN. To formalize this objective, we derive novel off-policy RL methods for RNNs from KL-control. The effectiveness of the approach is demonstrated on two applications; 1) generating novel musical melodies, and 2) computational molecular generation. For both problems, we show that the proposed method improves the desired properties and structure of the generated sequences, while maintaining information learned from data.", "bibtex": "@InProceedings{pmlr-v70-jaques17a,\n title = \t {Sequence Tutor: Conservative Fine-Tuning of Sequence Generation Models with {KL}-control},\n author = {Natasha Jaques and Shixiang Gu and Dzmitry Bahdanau and Jos{\\'e} Miguel Hern{\\'a}ndez-Lobato and Richard E. Turner and Douglas Eck},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1645--1654},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jaques17a/jaques17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jaques17a.html},\n abstract = \t {This paper proposes a general method for improving the structure and quality of sequences generated by a recurrent neural network (RNN), while maintaining information originally learned from data, as well as sample diversity. An RNN is first pre-trained on data using maximum likelihood estimation (MLE), and the probability distribution over the next token in the sequence learned by this model is treated as a prior policy. Another RNN is then trained using reinforcement learning (RL) to generate higher-quality outputs that account for domain-specific incentives while retaining proximity to the prior policy of the MLE RNN. To formalize this objective, we derive novel off-policy RL methods for RNNs from KL-control. The effectiveness of the approach is demonstrated on two applications; 1) generating novel musical melodies, and 2) computational molecular generation. For both problems, we show that the proposed method improves the desired properties and structure of the generated sequences, while maintaining information learned from data.}\n}", "pdf": "http://proceedings.mlr.press/v70/jaques17a/jaques17a.pdf", "supp": "", "pdf_size": 775063, "gs_citation": 213, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12110286055488959258&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": "Google Brain, Mountain View, USA+Massachusetts Institute of Technology, Cambridge, USA; Google Brain, Mountain View, USA+University of Cambridge, Cambridge, UK+Max Planck Institute for Intelligent Systems, Stuttgart, Germany; Google Brain, Mountain View, USA+Universit\u00e9 de Montr\u00e9al, Montr\u00e9al, Canada; University of Cambridge, Cambridge, UK; University of Cambridge, Cambridge, UK; Google Brain, Mountain View, USA", "aff_domain": "mit.edu; ; ; ; ; ", "email": "mit.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/jaques17a.html", "aff_unique_index": "0+1;0+2+3;0+4;2;2;0", "aff_unique_norm": "Google;Massachusetts Institute of Technology;University of Cambridge;Max Planck Institute for Intelligent Systems;Universit\u00e9 de Montr\u00e9al", "aff_unique_dep": "Google Brain;;;;", "aff_unique_url": "https://brain.google.com;https://web.mit.edu;https://www.cam.ac.uk;https://www.mpi-is.mpg.de;https://www.umontreal.ca", "aff_unique_abbr": "Google Brain;MIT;Cambridge;MPI-IS;UdeM", "aff_campus_unique_index": "0+1;0+1+2;0+3;1;1;0", "aff_campus_unique": "Mountain View;Cambridge;Stuttgart;Montr\u00e9al", "aff_country_unique_index": "0+0;0+1+2;0+3;1;1;0", "aff_country_unique": "United States;United Kingdom;Germany;Canada" }, { "title": "Sequence to Better Sequence: Continuous Revision of Combinatorial Structures", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/623", "id": "623", "author_site": "Jonas Mueller, David Gifford, Tommi Jaakkola", "author": "Jonas Mueller; David Gifford; Tommi Jaakkola", "abstract": "We present a model that, after learning on observations of (sequence, outcome) pairs, can be efficiently used to revise a new sequence in order to improve its associated outcome. Our framework requires neither example improvements, nor additional evaluation of outcomes for proposed revisions. To avoid combinatorial-search over sequence elements, we specify a generative model with continuous latent factors, which is learned via joint approximate inference using a recurrent variational autoencoder (VAE) and an outcome-predicting neural network module. Under this model, gradient methods can be used to efficiently optimize the continuous latent factors with respect to inferred outcomes. By appropriately constraining this optimization and using the VAE decoder to generate a revised sequence, we ensure the revision is fundamentally similar to the original sequence, is associated with better outcomes, and looks natural. These desiderata are proven to hold with high probability under our approach, which is empirically demonstrated for revising natural language sentences.", "bibtex": "@InProceedings{pmlr-v70-mueller17a,\n title = \t {Sequence to Better Sequence: Continuous Revision of Combinatorial Structures},\n author = {Jonas Mueller and David Gifford and Tommi Jaakkola},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2536--2544},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mueller17a/mueller17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mueller17a.html},\n abstract = \t {We present a model that, after learning on observations of (sequence, outcome) pairs, can be efficiently used to revise a new sequence in order to improve its associated outcome. Our framework requires neither example improvements, nor additional evaluation of outcomes for proposed revisions. To avoid combinatorial-search over sequence elements, we specify a generative model with continuous latent factors, which is learned via joint approximate inference using a recurrent variational autoencoder (VAE) and an outcome-predicting neural network module. Under this model, gradient methods can be used to efficiently optimize the continuous latent factors with respect to inferred outcomes. By appropriately constraining this optimization and using the VAE decoder to generate a revised sequence, we ensure the revision is fundamentally similar to the original sequence, is associated with better outcomes, and looks natural. These desiderata are proven to hold with high probability under our approach, which is empirically demonstrated for revising natural language sentences.}\n}", "pdf": "http://proceedings.mlr.press/v70/mueller17a/mueller17a.pdf", "supp": "", "pdf_size": 1957302, "gs_citation": 124, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4687027610989448483&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory; MIT Computer Science & Artificial Intelligence Laboratory", "aff_domain": "csail.mit.edu; ; ", "email": "csail.mit.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/mueller17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Massachusetts Institute of Technology", "aff_unique_dep": "Computer Science & Artificial Intelligence Laboratory", "aff_unique_url": "https://www.csail.mit.edu", "aff_unique_abbr": "MIT CSAIL", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sharp Minima Can Generalize For Deep Nets", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/607", "id": "607", "author_site": "Laurent Dinh, Razvan Pascanu, Samy Bengio, Yoshua Bengio", "author": "Laurent Dinh; Razvan Pascanu; Samy Bengio; Yoshua Bengio", "abstract": "Despite their overwhelming capacity to overfit, deep learning architectures tend to generalize relatively well to unseen data, allowing them to be deployed in practice. However, explaining why this is the case is still an open area of research. One standing hypothesis that is gaining popularity, e.g.\\ Hochreiter \\& Schmidhuber (1997); Keskar et al.\\ (2017), is that the flatness of minima of the loss function found by stochastic gradient based methods results in good generalization. This paper argues that most notions of flatness are problematic for deep models and can not be directly applied to explain generalization. Specifically, when focusing on deep networks with rectifier units, we can exploit the particular geometry of parameter space induced by the inherent symmetries that these architectures exhibit to build equivalent models corresponding to arbitrarily sharper minima. Or, depending on the definition of flatness, it is the same for any given minimum. Furthermore, if we allow to reparametrize a function, the geometry of its parameters can change drastically without affecting its generalization properties.", "bibtex": "@InProceedings{pmlr-v70-dinh17b,\n title = \t {Sharp Minima Can Generalize For Deep Nets},\n author = {Laurent Dinh and Razvan Pascanu and Samy Bengio and Yoshua Bengio},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1019--1028},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dinh17b/dinh17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/dinh17b.html},\n abstract = \t {Despite their overwhelming capacity to overfit, deep learning architectures tend to generalize relatively well to unseen data, allowing them to be deployed in practice. However, explaining why this is the case is still an open area of research. One standing hypothesis that is gaining popularity, e.g.\\ Hochreiter \\& Schmidhuber (1997); Keskar et al.\\ (2017), is that the flatness of minima of the loss function found by stochastic gradient based methods results in good generalization. This paper argues that most notions of flatness are problematic for deep models and can not be directly applied to explain generalization. Specifically, when focusing on deep networks with rectifier units, we can exploit the particular geometry of parameter space induced by the inherent symmetries that these architectures exhibit to build equivalent models corresponding to arbitrarily sharper minima. Or, depending on the definition of flatness, it is the same for any given minimum. Furthermore, if we allow to reparametrize a function, the geometry of its parameters can change drastically without affecting its generalization properties.}\n}", "pdf": "http://proceedings.mlr.press/v70/dinh17b/dinh17b.pdf", "supp": "", "pdf_size": 1496159, "gs_citation": 893, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4474448870091274183&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "Universit\u00e9 of Montr\u00e9al, Montr\u00e9al, Canada; DeepMind, London, United Kingdom; Google Brain, Mountain View, United States; CIFAR Senior Fellow", "aff_domain": "umontreal.ca; ; ; ", "email": "umontreal.ca; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/dinh17b.html", "aff_unique_index": "0;1;2;3", "aff_unique_norm": "Universit\u00e9 de Montr\u00e9al;DeepMind;Google;CIFAR", "aff_unique_dep": ";;Google Brain;Senior Fellow", "aff_unique_url": "https://www.umontreal.ca;https://deepmind.com;https://brain.google.com;https://www.cifar.ca", "aff_unique_abbr": "UdeM;DeepMind;Google Brain;CIFAR", "aff_campus_unique_index": "0;1;2", "aff_campus_unique": "Montr\u00e9al;London;Mountain View;", "aff_country_unique_index": "0;1;2;0", "aff_country_unique": "Canada;United Kingdom;United States" }, { "title": "Simultaneous Learning of Trees and Representations for Extreme Classification and Density Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/782", "id": "782", "author_site": "Yacine Jernite, Anna Choromanska, David Sontag", "author": "Yacine Jernite; Anna Choromanska; David Sontag", "abstract": "We consider multi-class classification where the predictor has a hierarchical structure that allows for a very large number of labels both at train and test time. The predictive power of such models can heavily depend on the structure of the tree, and although past work showed how to learn the tree structure, it expected that the feature vectors remained static. We provide a novel algorithm to simultaneously perform representation learning for the input data and learning of the hierarchical predictor. Our approach optimizes an objective function which favors balanced and easily-separable multi-way node partitions. We theoretically analyze this objective, showing that it gives rise to a boosting style property and a bound on classification error. We next show how to extend the algorithm to conditional density estimation. We empirically validate both variants of the algorithm on text classification and language modeling, respectively, and show that they compare favorably to common baselines in terms of accuracy and running time.", "bibtex": "@InProceedings{pmlr-v70-jernite17a,\n title = \t {Simultaneous Learning of Trees and Representations for Extreme Classification and Density Estimation},\n author = {Yacine Jernite and Anna Choromanska and David Sontag},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1665--1674},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jernite17a/jernite17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jernite17a.html},\n abstract = \t {We consider multi-class classification where the predictor has a hierarchical structure that allows for a very large number of labels both at train and test time. The predictive power of such models can heavily depend on the structure of the tree, and although past work showed how to learn the tree structure, it expected that the feature vectors remained static. We provide a novel algorithm to simultaneously perform representation learning for the input data and learning of the hierarchical predictor. Our approach optimizes an objective function which favors balanced and easily-separable multi-way node partitions. We theoretically analyze this objective, showing that it gives rise to a boosting style property and a bound on classification error. We next show how to extend the algorithm to conditional density estimation. We empirically validate both variants of the algorithm on text classification and language modeling, respectively, and show that they compare favorably to common baselines in terms of accuracy and running time.}\n}", "pdf": "http://proceedings.mlr.press/v70/jernite17a/jernite17a.pdf", "supp": "", "pdf_size": 362593, "gs_citation": 42, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6143734481706812471&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "New York University, New York, New York, USA; New York University, New York, New York, USA; Massachussets Institute of Technology, Cambridge, Massachussets, USA", "aff_domain": "cs.nyu.edu; ; ", "email": "cs.nyu.edu; ; ", "github": "https://github.com/yjernite/fastTextLearnTree", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/jernite17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "New York University;Massachusetts Institute of Technology", "aff_unique_dep": ";", "aff_unique_url": "https://www.nyu.edu;https://www.mit.edu", "aff_unique_abbr": "NYU;MIT", "aff_campus_unique_index": "0;0;1", "aff_campus_unique": "New York;Cambridge", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sketched Ridge Regression: Optimization Perspective, Statistical Perspective, and Model Averaging", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/486", "id": "486", "author_site": "Shusen Wang, Alex Gittens, Michael Mahoney", "author": "Shusen Wang; Alex Gittens; Michael W. Mahoney", "abstract": "We address the statistical and optimization impacts of using classical sketch versus Hessian sketch to solve approximately the Matrix Ridge Regression (MRR) problem. Prior research has considered the effects of classical sketch on least squares regression (LSR), a strictly simpler problem. We establish that classical sketch has a similar effect upon the optimization properties of MRR as it does on those of LSR\u2014namely, it recovers nearly optimal solutions. In contrast, Hessian sketch does not have this guarantee; instead, the approximation error is governed by a subtle interplay between the \u201cmass\u201d in the responses and the optimal objective value. For both types of approximations, the regularization in the sketched MRR problem gives it significantly different statistical properties from the sketched LSR problem. In particular, there is a bias-variance trade-off in sketched MRR that is not present in sketched LSR. We provide upper and lower bounds on the biases and variances of sketched MRR; these establish that the variance is significantly increased when classical sketches are used, while the bias is significantly increased when using Hessian sketches. Empirically, sketched MRR solutions can have risks that are higher by an order-of-magnitude than those of the optimal MRR solutions. We establish theoretically and empirically that model averaging greatly decreases this gap. Thus, in the distributed setting, sketching combined with model averaging is a powerful technique that quickly obtains near-optimal solutions to the MRR problem while greatly mitigating the statistical risks incurred by sketching.", "bibtex": "@InProceedings{pmlr-v70-wang17c,\n title = \t {Sketched Ridge Regression: Optimization Perspective, Statistical Perspective, and Model Averaging},\n author = {Shusen Wang and Alex Gittens and Michael W. Mahoney},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3608--3616},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17c/wang17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17c.html},\n abstract = \t {We address the statistical and optimization impacts of using classical sketch versus Hessian sketch to solve approximately the Matrix Ridge Regression (MRR) problem. Prior research has considered the effects of classical sketch on least squares regression (LSR), a strictly simpler problem. We establish that classical sketch has a similar effect upon the optimization properties of MRR as it does on those of LSR\u2014namely, it recovers nearly optimal solutions. In contrast, Hessian sketch does not have this guarantee; instead, the approximation error is governed by a subtle interplay between the \u201cmass\u201d in the responses and the optimal objective value. For both types of approximations, the regularization in the sketched MRR problem gives it significantly different statistical properties from the sketched LSR problem. In particular, there is a bias-variance trade-off in sketched MRR that is not present in sketched LSR. We provide upper and lower bounds on the biases and variances of sketched MRR; these establish that the variance is significantly increased when classical sketches are used, while the bias is significantly increased when using Hessian sketches. Empirically, sketched MRR solutions can have risks that are higher by an order-of-magnitude than those of the optimal MRR solutions. We establish theoretically and empirically that model averaging greatly decreases this gap. Thus, in the distributed setting, sketching combined with model averaging is a powerful technique that quickly obtains near-optimal solutions to the MRR problem while greatly mitigating the statistical risks incurred by sketching.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17c/wang17c.pdf", "supp": "", "pdf_size": 598024, "gs_citation": 109, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4273538981090752913&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": "International Computer Science Institute and Department of Statistics, University of California at Berkeley, USA; Department of Computer Science, Rensselaer Polytechnic Institute, USA; International Computer Science Institute and Department of Statistics, University of California at Berkeley, USA", "aff_domain": "berkeley.edu;rpi.edu;stat.berkeley.edu", "email": "berkeley.edu;rpi.edu;stat.berkeley.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/wang17c.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "University of California, Berkeley;Rensselaer Polytechnic Institute", "aff_unique_dep": "Department of Statistics;Department of Computer Science", "aff_unique_url": "https://www.berkeley.edu;https://www.rpi.edu", "aff_unique_abbr": "UC Berkeley;RPI", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Sliced Wasserstein Kernel for Persistence Diagrams", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/524", "id": "524", "author_site": "Mathieu Carri\u00e8re, Marco Cuturi, Steve Oudot", "author": "Mathieu Carri\u00e8re; Marco Cuturi; Steve Oudot", "abstract": "Persistence diagrams (PDs) play a key role in topological data analysis (TDA), in which they are routinely used to describe succinctly complex topological properties of complicated shapes. PDs enjoy strong stability properties and have proven their utility in various learning contexts. They do not, however, live in a space naturally endowed with a Hilbert structure and are usually compared with specific distances, such as the bottleneck distance. To incorporate PDs in a learning pipeline, several kernels have been proposed for PDs with a strong emphasis on the stability of the RKHS distance w.r.t. perturbations of the PDs. In this article, we use the Sliced Wasserstein approximation of the Wasserstein distance to define a new kernel for PDs, which is not only provably stable but also provably discriminative w.r.t. the Wasserstein distance $W^1_\\infty$ between PDs. We also demonstrate its practicality, by developing an approximation technique to reduce kernel computation time, and show that our proposal compares favorably to existing kernels for PDs on several benchmarks.", "bibtex": "@InProceedings{pmlr-v70-carriere17a,\n title = \t {Sliced {W}asserstein Kernel for Persistence Diagrams},\n author = {Mathieu Carri{\\`e}re and Marco Cuturi and Steve Oudot},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {664--673},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/carriere17a/carriere17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/carriere17a.html},\n abstract = \t {Persistence diagrams (PDs) play a key role in topological data analysis (TDA), in which they are routinely used to describe succinctly complex topological properties of complicated shapes. PDs enjoy strong stability properties and have proven their utility in various learning contexts. They do not, however, live in a space naturally endowed with a Hilbert structure and are usually compared with specific distances, such as the bottleneck distance. To incorporate PDs in a learning pipeline, several kernels have been proposed for PDs with a strong emphasis on the stability of the RKHS distance w.r.t. perturbations of the PDs. In this article, we use the Sliced Wasserstein approximation of the Wasserstein distance to define a new kernel for PDs, which is not only provably stable but also provably discriminative w.r.t. the Wasserstein distance $W^1_\\infty$ between PDs. We also demonstrate its practicality, by developing an approximation technique to reduce kernel computation time, and show that our proposal compares favorably to existing kernels for PDs on several benchmarks.}\n}", "pdf": "http://proceedings.mlr.press/v70/carriere17a/carriere17a.pdf", "supp": "", "pdf_size": 1689185, "gs_citation": 323, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5424979481875749393&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 18, "aff": "INRIA Saclay; CREST, ENSAE, Universit e Paris Saclay; INRIA Saclay", "aff_domain": "inria.fr; ; ", "email": "inria.fr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/carriere17a.html", "aff_unique_index": "0;1;0", "aff_unique_norm": "INRIA;CREST", "aff_unique_dep": ";", "aff_unique_url": "https://www.inria.fr;", "aff_unique_abbr": "INRIA;", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Saclay;", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "Soft-DTW: a Differentiable Loss Function for Time-Series", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/769", "id": "769", "author_site": "Marco Cuturi, Mathieu Blondel", "author": "Marco Cuturi; Mathieu Blondel", "abstract": "We propose in this paper a differentiable learning loss between time series, building upon the celebrated dynamic time warping (DTW) discrepancy. Unlike the Euclidean distance, DTW can compare time series of variable size and is robust to shifts or dilatations across the time dimension. To compute DTW, one typically solves a minimal-cost alignment problem between two time series using dynamic programming. Our work takes advantage of a smoothed formulation of DTW, called soft-DTW, that computes the soft-minimum of all alignment costs. We show in this paper that soft-DTW is a", "bibtex": "@InProceedings{pmlr-v70-cuturi17a,\n title = \t {Soft-{DTW}: a Differentiable Loss Function for Time-Series},\n author = {Marco Cuturi and Mathieu Blondel},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {894--903},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/cuturi17a/cuturi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/cuturi17a.html},\n abstract = \t {We propose in this paper a differentiable learning loss between time series, building upon the celebrated dynamic time warping (DTW) discrepancy. Unlike the Euclidean distance, DTW can compare time series of variable size and is robust to shifts or dilatations across the time dimension. To compute DTW, one typically solves a minimal-cost alignment problem between two time series using dynamic programming. Our work takes advantage of a smoothed formulation of DTW, called soft-DTW, that computes the soft-minimum of all alignment costs. We show in this paper that soft-DTW is a", "pdf": "http://proceedings.mlr.press/v70/cuturi17a/cuturi17a.pdf", "supp": "", "pdf_size": 617844, "gs_citation": 956, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17321643836550070491&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "CREST, ENSAE, Universit \u00b4e Paris-Saclay, France; NTT Communication Science Laboratories, Seika-cho, Kyoto, Japan", "aff_domain": "ensae.fr;mblondel.org", "email": "ensae.fr;mblondel.org", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/cuturi17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Universit \u00e9 Paris-Saclay;NTT Communication Science Laboratories", "aff_unique_dep": "CREST, ENSAE;", "aff_unique_url": "https://www.universite-paris-saclay.fr;https://www.ntt-csl.com", "aff_unique_abbr": ";NTT CSL", "aff_campus_unique_index": "1", "aff_campus_unique": ";Kyoto", "aff_country_unique_index": "0;1", "aff_country_unique": "France;Japan" }, { "title": "Source-Target Similarity Modelings for Multi-Source Transfer Gaussian Process Regression", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/679", "id": "679", "author_site": "PENGFEI WEI, Ramon Sagarna, Yiping Ke, Yew Soon ONG, CHI GOH", "author": "Pengfei Wei; Ramon Sagarna; Yiping Ke; Yew-Soon Ong; Chi-Keong Goh", "abstract": "A key challenge in multi-source transfer learning is to capture the diverse inter-domain similarities. In this paper, we study different approaches based on Gaussian process models to solve the multi-source transfer regression problem. Precisely, we first investigate the feasibility and performance of a family of transfer covariance functions that represent the pairwise similarity of each source and the target domain. We theoretically show that using such a transfer covariance function for general Gaussian process modelling can only capture the same similarity coefficient for all the sources, and thus may result in unsatisfactory transfer performance. This leads us to propose", "bibtex": "@InProceedings{pmlr-v70-wei17a,\n title = \t {Source-Target Similarity Modelings for Multi-Source Transfer {G}aussian Process Regression},\n author = {Pengfei Wei and Ramon Sagarna and Yiping Ke and Yew-Soon Ong and Chi-Keong Goh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3722--3731},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wei17a/wei17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wei17a.html},\n abstract = \t {A key challenge in multi-source transfer learning is to capture the diverse inter-domain similarities. In this paper, we study different approaches based on Gaussian process models to solve the multi-source transfer regression problem. Precisely, we first investigate the feasibility and performance of a family of transfer covariance functions that represent the pairwise similarity of each source and the target domain. We theoretically show that using such a transfer covariance function for general Gaussian process modelling can only capture the same similarity coefficient for all the sources, and thus may result in unsatisfactory transfer performance. This leads us to propose", "pdf": "http://proceedings.mlr.press/v70/wei17a/wei17a.pdf", "supp": "", "pdf_size": 1939310, "gs_citation": 38, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6421034860082180336&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "School of Computer Science and Engineering, Nanyang Technological University, Singapore+Rolls-Royce@Nanyang Technological University Corporate Lab; School of Computer Science and Engineering, Nanyang Technological University, Singapore+Rolls-Royce@Nanyang Technological University Corporate Lab; School of Computer Science and Engineering, Nanyang Technological University, Singapore+Rolls-Royce@Nanyang Technological University Corporate Lab; School of Computer Science and Engineering, Nanyang Technological University, Singapore+Rolls-Royce@Nanyang Technological University Corporate Lab; Rolls-Royce Advanced Technology Centre, Singapore+Rolls-Royce@Nanyang Technological University Corporate Lab", "aff_domain": "e.ntu.edu.sg;ntu.edu.sg; ; ; ", "email": "e.ntu.edu.sg;ntu.edu.sg; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/wei17a.html", "aff_unique_index": "0+0;0+0;0+0;0+0;1+0", "aff_unique_norm": "Nanyang Technological University;Rolls-Royce", "aff_unique_dep": "School of Computer Science and Engineering;Advanced Technology Centre", "aff_unique_url": "https://www.ntu.edu.sg;https://www.rolls-royce.com", "aff_unique_abbr": "NTU;", "aff_campus_unique_index": "0;0;0;0;", "aff_campus_unique": "Singapore;", "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", "aff_country_unique": "Singapore" }, { "title": "Sparse + Group-Sparse Dirty Models: Statistical Guarantees without Unreasonable Conditions and a Case for Non-Convexity", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/647", "id": "647", "author_site": "Eunho Yang, Aurelie Lozano", "author": "Eunho Yang; Aur\u00e9lie C. Lozano", "abstract": "Imposing sparse + group-sparse superposition structures in high-dimensional parameter estimation is known to provide flexible regularization that is more realistic for many real-world problems. For example, such a superposition enables partially-shared support sets in multi-task learning, thereby striking the right balance between parameter overlap across tasks and task specificity. Existing theoretical results on estimation consistency, however, are problematic as they require too stringent an assumption: the incoherence between sparse and group-sparse superposed components. In this paper, we fill the gap between the practical success and suboptimal analysis of sparse + group-sparse models, by providing the first consistency results that do not require unrealistic assumptions. We also study non-convex counterparts of sparse + group-sparse models. Interestingly, we show that these are guaranteed to recover the true support set under much milder conditions and with smaller sample size than convex models, which might be critical in practical applications as illustrated by our experiments.", "bibtex": "@InProceedings{pmlr-v70-yang17g,\n title = \t {Sparse + Group-Sparse Dirty Models: Statistical Guarantees without Unreasonable Conditions and a Case for Non-Convexity},\n author = {Eunho Yang and Aur{\\'e}lie C. Lozano},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3911--3920},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17g/yang17g.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17g.html},\n abstract = \t {Imposing sparse + group-sparse superposition structures in high-dimensional parameter estimation is known to provide flexible regularization that is more realistic for many real-world problems. For example, such a superposition enables partially-shared support sets in multi-task learning, thereby striking the right balance between parameter overlap across tasks and task specificity. Existing theoretical results on estimation consistency, however, are problematic as they require too stringent an assumption: the incoherence between sparse and group-sparse superposed components. In this paper, we fill the gap between the practical success and suboptimal analysis of sparse + group-sparse models, by providing the first consistency results that do not require unrealistic assumptions. We also study non-convex counterparts of sparse + group-sparse models. Interestingly, we show that these are guaranteed to recover the true support set under much milder conditions and with smaller sample size than convex models, which might be critical in practical applications as illustrated by our experiments.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17g/yang17g.pdf", "supp": "", "pdf_size": 629395, "gs_citation": 5, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8348735422711770743&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "School of Computing, KAIST, Daejeon, South Korea+AItrics, Seoul, South Korea; IBM T.J. Watson Research Center, Yorktown Heights, NY, USA", "aff_domain": "kaist.ac.kr;us.ibm.com", "email": "kaist.ac.kr;us.ibm.com", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/yang17g.html", "aff_unique_index": "0+1;2", "aff_unique_norm": "KAIST;AITRICS;IBM", "aff_unique_dep": "School of Computing;;IBM T.J. Watson Research Center", "aff_unique_url": "https://www.kaist.ac.kr;;https://www.ibm.com/research/watson", "aff_unique_abbr": "KAIST;;IBM Watson", "aff_campus_unique_index": "0+1;2", "aff_campus_unique": "Daejeon;Seoul;Yorktown Heights", "aff_country_unique_index": "0+0;1", "aff_country_unique": "South Korea;United States" }, { "title": "Spectral Learning from a Single Trajectory under Finite-State Policies", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/820", "id": "820", "author_site": "Borja de Balle Pigem, Odalric Maillard", "author": "Borja Balle; Odalric-Ambrym Maillard", "abstract": "We present spectral methods of moments for learning sequential models from a single trajectory, in stark contrast with the classical literature that assumes the availability of multiple i.i.d. trajectories. Our approach leverages an efficient SVD-based learning algorithm for weighted automata and provides the first rigorous analysis for learning many important models using dependent data. We state and analyze the algorithm under three increasingly difficult scenarios: probabilistic automata, stochastic weighted automata, and reactive predictive state representations controlled by a finite-state policy. Our proofs include novel tools for studying mixing properties of stochastic weighted automata.", "bibtex": "@InProceedings{pmlr-v70-balle17a,\n title = \t {Spectral Learning from a Single Trajectory under Finite-State Policies},\n author = {Borja Balle and Odalric-Ambrym Maillard},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {361--370},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balle17a/balle17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/balle17a.html},\n abstract = \t {We present spectral methods of moments for learning sequential models from a single trajectory, in stark contrast with the classical literature that assumes the availability of multiple i.i.d. trajectories. Our approach leverages an efficient SVD-based learning algorithm for weighted automata and provides the first rigorous analysis for learning many important models using dependent data. We state and analyze the algorithm under three increasingly difficult scenarios: probabilistic automata, stochastic weighted automata, and reactive predictive state representations controlled by a finite-state policy. Our proofs include novel tools for studying mixing properties of stochastic weighted automata.}\n}", "pdf": "http://proceedings.mlr.press/v70/balle17a/balle17a.pdf", "supp": "", "pdf_size": 359936, "gs_citation": 6, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4571861091034755617&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Amazon Research, Cambridge, UK (work done at Lancaster University); Inria Lille - Nord Europe, Villeneuve d\u2019Ascq, France", "aff_domain": "amazon.co.uk;inria.fr", "email": "amazon.co.uk;inria.fr", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/balle17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Amazon;INRIA", "aff_unique_dep": "Amazon Research;", "aff_unique_url": "https://www.amazon.science;https://www.inria.fr", "aff_unique_abbr": "Amazon;Inria", "aff_campus_unique_index": "0;1", "aff_campus_unique": "Cambridge;Lille - Nord Europe", "aff_country_unique_index": "0;1", "aff_country_unique": "United Kingdom;France" }, { "title": "Spherical Structured Feature Maps for Kernel Approximation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/537", "id": "537", "author_site": "Yueming LYU", "author": "Yueming Lyu", "abstract": "We propose Spherical Structured Feature (SSF) maps to approximate shift and rotation invariant kernels as well as $b^{th}$-order arc-cosine kernels (Cho \\& Saul, 2009). We construct SSF maps based on the point set on $d-1$ dimensional sphere $\\mathbb{S}^{d-1}$. We prove that the inner product of SSF maps are unbiased estimates for above kernels if asymptotically uniformly distributed point set on $\\mathbb{S}^{d-1}$ is given. According to (Brauchart \\& Grabner, 2015), optimizing the discrete Riesz s-energy can generate asymptotically uniformly distributed point set on $\\mathbb{S}^{d-1}$. Thus, we propose an efficient coordinate decent method to find a local optimum of the discrete Riesz s-energy for SSF maps construction. Theoretically, SSF maps construction achieves linear space complexity and loglinear time complexity. Empirically, SSF maps achieve superior performance compared with other methods.", "bibtex": "@InProceedings{pmlr-v70-lyu17a,\n title = \t {Spherical Structured Feature Maps for Kernel Approximation},\n author = {Yueming Lyu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2256--2264},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/lyu17a/lyu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/lyu17a.html},\n abstract = \t {We propose Spherical Structured Feature (SSF) maps to approximate shift and rotation invariant kernels as well as $b^{th}$-order arc-cosine kernels (Cho \\& Saul, 2009). We construct SSF maps based on the point set on $d-1$ dimensional sphere $\\mathbb{S}^{d-1}$. We prove that the inner product of SSF maps are unbiased estimates for above kernels if asymptotically uniformly distributed point set on $\\mathbb{S}^{d-1}$ is given. According to (Brauchart \\& Grabner, 2015), optimizing the discrete Riesz s-energy can generate asymptotically uniformly distributed point set on $\\mathbb{S}^{d-1}$. Thus, we propose an efficient coordinate decent method to find a local optimum of the discrete Riesz s-energy for SSF maps construction. Theoretically, SSF maps construction achieves linear space complexity and loglinear time complexity. Empirically, SSF maps achieve superior performance compared with other methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/lyu17a/lyu17a.pdf", "supp": "", "pdf_size": 780252, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8240152760328644355&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Department of Computer Science, City University of Hong Kong", "aff_domain": "outlook.com", "email": "outlook.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/lyu17a.html", "aff_unique_index": "0", "aff_unique_norm": "City University of Hong Kong", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.cityu.edu.hk", "aff_unique_abbr": "CityU", "aff_campus_unique_index": "0", "aff_campus_unique": "Hong Kong SAR", "aff_country_unique_index": "0", "aff_country_unique": "China" }, { "title": "SplitNet: Learning to Semantically Split Deep Networks for Parameter Reduction and Model Parallelization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/490", "id": "490", "author_site": "Juyong Kim, Yookoon Park, Gunhee Kim, Sung Ju Hwang", "author": "Juyong Kim; Yookoon Park; Gunhee Kim; Sung Ju Hwang", "abstract": "We propose a novel deep neural network that is both lightweight and effectively structured for model parallelization. Our network, which we name as SplitNet, automatically learns to split the network weights into either a set or a hierarchy of multiple groups that use disjoint sets of features, by learning both the class-to-group and feature-to-group assignment matrices along with the network weights. This produces a tree-structured network that involves no connection between branched subtrees of semantically disparate class groups. SplitNet thus greatly reduces the number of parameters and requires significantly less computations, and is also embarrassingly model parallelizable at test time, since the network evaluation for each subnetwork is completely independent except for the shared lower layer weights that can be duplicated over multiple processors. We validate our method with two deep network models (ResNet and AlexNet) on two different datasets (CIFAR-100 and ILSVRC 2012) for image classification, on which our method obtains networks with significantly reduced number of parameters while achieving comparable or superior classification accuracies over original full deep networks, and accelerated test speed with multiple GPUs.", "bibtex": "@InProceedings{pmlr-v70-kim17b,\n title = \t {{S}plit{N}et: Learning to Semantically Split Deep Networks for Parameter Reduction and Model Parallelization},\n author = {Juyong Kim and Yookoon Park and Gunhee Kim and Sung Ju Hwang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1866--1874},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kim17b/kim17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/kim17b.html},\n abstract = \t {We propose a novel deep neural network that is both lightweight and effectively structured for model parallelization. Our network, which we name as SplitNet, automatically learns to split the network weights into either a set or a hierarchy of multiple groups that use disjoint sets of features, by learning both the class-to-group and feature-to-group assignment matrices along with the network weights. This produces a tree-structured network that involves no connection between branched subtrees of semantically disparate class groups. SplitNet thus greatly reduces the number of parameters and requires significantly less computations, and is also embarrassingly model parallelizable at test time, since the network evaluation for each subnetwork is completely independent except for the shared lower layer weights that can be duplicated over multiple processors. We validate our method with two deep network models (ResNet and AlexNet) on two different datasets (CIFAR-100 and ILSVRC 2012) for image classification, on which our method obtains networks with significantly reduced number of parameters while achieving comparable or superior classification accuracies over original full deep networks, and accelerated test speed with multiple GPUs.}\n}", "pdf": "http://proceedings.mlr.press/v70/kim17b/kim17b.pdf", "supp": "", "pdf_size": 860721, "gs_citation": 101, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3337586753754892728&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Seoul National University; Seoul National University; Seoul National University; UNIST+AITrics", "aff_domain": "snu.ac.kr;snu.ac.kr;snu.ac.kr;unist.ac.kr", "email": "snu.ac.kr;snu.ac.kr;snu.ac.kr;unist.ac.kr", "github": "", "project": "http://vision.snu.ac.kr/projects/splitnet", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/kim17b.html", "aff_unique_index": "0;0;0;1+2", "aff_unique_norm": "Seoul National University;Ulsan National Institute of Science and Technology;AITRICS", "aff_unique_dep": ";;", "aff_unique_url": "https://www.snu.ac.kr;https://www.unist.ac.kr;https://www.aitrics.com", "aff_unique_abbr": "SNU;UNIST;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0+0", "aff_country_unique": "South Korea" }, { "title": "Stabilising Experience Replay for Deep Multi-Agent Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/778", "id": "778", "author_site": "Jakob Foerster, Nantas Nardelli, Gregory Farquhar, Triantafyllos Afouras, Phil Torr, Pushmeet Kohli, Shimon Whiteson", "author": "Jakob Foerster; Nantas Nardelli; Gregory Farquhar; Triantafyllos Afouras; Philip H. S. Torr; Pushmeet Kohli; Shimon Whiteson", "abstract": "Many real-world problems, such as network packet routing and urban traffic control, are naturally modeled as multi-agent reinforcement learning (RL) problems. However, existing multi-agent RL methods typically scale poorly in the problem size. Therefore, a key challenge is to translate the success of deep learning on single-agent RL to the multi-agent setting. A major stumbling block is that independent Q-learning, the most popular multi-agent RL method, introduces nonstationarity that makes it incompatible with the experience replay memory on which deep Q-learning relies. This paper proposes two methods that address this problem: 1) using a multi-agent variant of importance sampling to naturally decay obsolete data and 2) conditioning each agent\u2019s value function on a fingerprint that disambiguates the age of the data sampled from the replay memory. Results on a challenging decentralised variant of StarCraft unit micromanagement confirm that these methods enable the successful combination of experience replay with multi-agent RL.", "bibtex": "@InProceedings{pmlr-v70-foerster17b,\n title = \t {Stabilising Experience Replay for Deep Multi-Agent Reinforcement Learning},\n author = {Jakob Foerster and Nantas Nardelli and Gregory Farquhar and Triantafyllos Afouras and Philip H. S. Torr and Pushmeet Kohli and Shimon Whiteson},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1146--1155},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/foerster17b/foerster17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/foerster17b.html},\n abstract = \t {Many real-world problems, such as network packet routing and urban traffic control, are naturally modeled as multi-agent reinforcement learning (RL) problems. However, existing multi-agent RL methods typically scale poorly in the problem size. Therefore, a key challenge is to translate the success of deep learning on single-agent RL to the multi-agent setting. A major stumbling block is that independent Q-learning, the most popular multi-agent RL method, introduces nonstationarity that makes it incompatible with the experience replay memory on which deep Q-learning relies. This paper proposes two methods that address this problem: 1) using a multi-agent variant of importance sampling to naturally decay obsolete data and 2) conditioning each agent\u2019s value function on a fingerprint that disambiguates the age of the data sampled from the replay memory. Results on a challenging decentralised variant of StarCraft unit micromanagement confirm that these methods enable the successful combination of experience replay with multi-agent RL.}\n}", "pdf": "http://proceedings.mlr.press/v70/foerster17b/foerster17b.pdf", "supp": "", "pdf_size": 1070865, "gs_citation": 819, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16652030977272114047&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "University of Oxford; University of Oxford; University of Oxford; University of Oxford; University of Oxford; Microsoft Research; University of Oxford", "aff_domain": "cs.ox.ac.uk;robots.ox.ac.uk; ; ; ; ; ", "email": "cs.ox.ac.uk;robots.ox.ac.uk; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/foerster17b.html", "aff_unique_index": "0;0;0;0;0;1;0", "aff_unique_norm": "University of Oxford;Microsoft", "aff_unique_dep": ";Microsoft Research", "aff_unique_url": "https://www.ox.ac.uk;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "Oxford;MSR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0;1;0", "aff_country_unique": "United Kingdom;United States" }, { "title": "State-Frequency Memory Recurrent Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/515", "id": "515", "author_site": "Hao Hu, Guo-Jun Qi", "author": "Hao Hu; Guo-Jun Qi", "abstract": "Modeling temporal sequences plays a fundamental role in various modern applications and has drawn more and more attentions in the machine learning community. Among those efforts on improving the capability to represent temporal data, the Long Short-Term Memory (LSTM) has achieved great success in many areas. Although the LSTM can capture long-range dependency in the time domain, it does not explicitly model the pattern occurrences in the frequency domain that plays an important role in tracking and predicting data points over various time cycles. We propose the State-Frequency Memory (SFM), a novel recurrent architecture that allows to separate dynamic patterns across different frequency components and their impacts on modeling the temporal contexts of input sequences. By jointly decomposing memorized dynamics into state-frequency components, the SFM is able to offer a fine-grained analysis of temporal sequences by capturing the dependency of uncovered patterns in both time and frequency domains. Evaluations on several temporal modeling tasks demonstrate the SFM can yield competitive performances, in particular as compared with the state-of-the-art LSTM models.", "bibtex": "@InProceedings{pmlr-v70-hu17c,\n title = \t {State-Frequency Memory Recurrent Neural Networks},\n author = {Hao Hu and Guo-Jun Qi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1568--1577},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hu17c/hu17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/hu17c.html},\n abstract = \t {Modeling temporal sequences plays a fundamental role in various modern applications and has drawn more and more attentions in the machine learning community. Among those efforts on improving the capability to represent temporal data, the Long Short-Term Memory (LSTM) has achieved great success in many areas. Although the LSTM can capture long-range dependency in the time domain, it does not explicitly model the pattern occurrences in the frequency domain that plays an important role in tracking and predicting data points over various time cycles. We propose the State-Frequency Memory (SFM), a novel recurrent architecture that allows to separate dynamic patterns across different frequency components and their impacts on modeling the temporal contexts of input sequences. By jointly decomposing memorized dynamics into state-frequency components, the SFM is able to offer a fine-grained analysis of temporal sequences by capturing the dependency of uncovered patterns in both time and frequency domains. Evaluations on several temporal modeling tasks demonstrate the SFM can yield competitive performances, in particular as compared with the state-of-the-art LSTM models.}\n}", "pdf": "http://proceedings.mlr.press/v70/hu17c/hu17c.pdf", "supp": "", "pdf_size": 859802, "gs_citation": 64, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9891647606524316014&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of Central Florida, Orlando, FL, USA; University of Central Florida, Orlando, FL, USA", "aff_domain": "ucf.edu;ucf.edu", "email": "ucf.edu;ucf.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/hu17c.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Central Florida", "aff_unique_dep": "", "aff_unique_url": "https://www.ucf.edu", "aff_unique_abbr": "UCF", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Orlando", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Statistical Inference for Incomplete Ranking Data: The Case of Rank-Dependent Coarsening", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/590", "id": "590", "author_site": "Mohsen Ahmadi Fahandar, Eyke H\u00fcllermeier, Ines Couso", "author": "Mohsen Ahmadi Fahandar; Eyke H\u00fcllermeier; In\u00e9s Couso", "abstract": "We consider the problem of statistical inference for ranking data, specifically rank aggregation, under the assumption that samples are incomplete in the sense of not comprising all choice alternatives. In contrast to most existing methods, we explicitly model the process of turning a full ranking into an incomplete one, which we call the coarsening process. To this end, we propose the concept of rank-dependent coarsening, which assumes that incomplete rankings are produced by projecting a full ranking to a random subset of ranks. For a concrete instantiation of our model, in which full rankings are drawn from a Plackett-Luce distribution and observations take the form of pairwise preferences, we study the performance of various rank aggregation methods. In addition to predictive accuracy in the finite sample setting, we address the theoretical question of consistency, by which we mean the ability to recover a target ranking when the sample size goes to infinity, despite a potential bias in the observations caused by the (unknown) coarsening.", "bibtex": "@InProceedings{pmlr-v70-fahandar17a,\n title = \t {Statistical Inference for Incomplete Ranking Data: The Case of Rank-Dependent Coarsening},\n author = {Mohsen Ahmadi Fahandar and Eyke H{\\\"u}llermeier and In{\\'e}s Couso},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1078--1087},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/fahandar17a/fahandar17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/fahandar17a.html},\n abstract = \t {We consider the problem of statistical inference for ranking data, specifically rank aggregation, under the assumption that samples are incomplete in the sense of not comprising all choice alternatives. In contrast to most existing methods, we explicitly model the process of turning a full ranking into an incomplete one, which we call the coarsening process. To this end, we propose the concept of rank-dependent coarsening, which assumes that incomplete rankings are produced by projecting a full ranking to a random subset of ranks. For a concrete instantiation of our model, in which full rankings are drawn from a Plackett-Luce distribution and observations take the form of pairwise preferences, we study the performance of various rank aggregation methods. In addition to predictive accuracy in the finite sample setting, we address the theoretical question of consistency, by which we mean the ability to recover a target ranking when the sample size goes to infinity, despite a potential bias in the observations caused by the (unknown) coarsening.}\n}", "pdf": "http://proceedings.mlr.press/v70/fahandar17a/fahandar17a.pdf", "supp": "", "pdf_size": 269795, "gs_citation": 23, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1734550436286758078&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Paderborn University, Germany; Paderborn University, Germany; University of Oviedo, Spain", "aff_domain": "upb.de;upb.de;uniovi.es", "email": "upb.de;upb.de;uniovi.es", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/fahandar17a.html", "aff_unique_index": "0;0;1", "aff_unique_norm": "Paderborn University;University of Oviedo", "aff_unique_dep": ";", "aff_unique_url": "https://www.uni-paderborn.de;https://www.uniovi.es", "aff_unique_abbr": "UPB;UniOvi", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;1", "aff_country_unique": "Germany;Spain" }, { "title": "StingyCD: Safely Avoiding Wasteful Updates in Coordinate Descent", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/764", "id": "764", "author_site": "Tyler Johnson, Carlos Guestrin", "author": "Tyler B. Johnson; Carlos Guestrin", "abstract": "Coordinate descent (CD) is a scalable and simple algorithm for solving many optimization problems in machine learning. Despite this fact, CD can also be very computationally wasteful. Due to sparsity in sparse regression problems, for example, the majority of CD updates often result in no progress toward the solution. To address this inefficiency, we propose a modified CD algorithm named \u201cStingyCD.\u201d By skipping over many updates that are guaranteed to not decrease the objective value, StingyCD significantly reduces convergence times. Since StingyCD only skips updates with this guarantee, however, StingyCD does not fully exploit the problem\u2019s sparsity. For this reason, we also propose StingyCD+, an algorithm that achieves further speed-ups by skipping updates more aggressively. Since StingyCD and StingyCD+ rely on simple modifications to CD, it is also straightforward to use these algorithms with other approaches to scaling optimization. In empirical comparisons, StingyCD and StingyCD+ improve convergence times considerably for several L1-regularized optimization problems.", "bibtex": "@InProceedings{pmlr-v70-johnson17a,\n title = \t {{S}tingy{CD}: Safely Avoiding Wasteful Updates in Coordinate Descent},\n author = {Tyler B. Johnson and Carlos Guestrin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1752--1760},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/johnson17a/johnson17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/johnson17a.html},\n abstract = \t {Coordinate descent (CD) is a scalable and simple algorithm for solving many optimization problems in machine learning. Despite this fact, CD can also be very computationally wasteful. Due to sparsity in sparse regression problems, for example, the majority of CD updates often result in no progress toward the solution. To address this inefficiency, we propose a modified CD algorithm named \u201cStingyCD.\u201d By skipping over many updates that are guaranteed to not decrease the objective value, StingyCD significantly reduces convergence times. Since StingyCD only skips updates with this guarantee, however, StingyCD does not fully exploit the problem\u2019s sparsity. For this reason, we also propose StingyCD+, an algorithm that achieves further speed-ups by skipping updates more aggressively. Since StingyCD and StingyCD+ rely on simple modifications to CD, it is also straightforward to use these algorithms with other approaches to scaling optimization. In empirical comparisons, StingyCD and StingyCD+ improve convergence times considerably for several L1-regularized optimization problems.}\n}", "pdf": "http://proceedings.mlr.press/v70/johnson17a/johnson17a.pdf", "supp": "", "pdf_size": 1384271, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3120368467500114533&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "University of Washington; University of Washington", "aff_domain": "washington.edu;cs.washington.edu", "email": "washington.edu;cs.washington.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/johnson17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Adaptive Quasi-Newton Methods for Minimizing Expected Values", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/856", "id": "856", "author_site": "Chaoxu Zhou, Wenbo Gao, Donald Goldfarb", "author": "Chaoxu Zhou; Wenbo Gao; Donald Goldfarb", "abstract": "We propose a novel class of stochastic, adaptive methods for minimizing self-concordant functions which can be expressed as an expected value. These methods generate an estimate of the true objective function by taking the empirical mean over a sample drawn at each step, making the problem tractable. The use of adaptive step sizes eliminates the need for the user to supply a step size. Methods in this class include extensions of gradient descent (GD) and BFGS. We show that, given a suitable amount of sampling, the stochastic adaptive GD attains linear convergence in expectation, and with further sampling, the stochastic adaptive BFGS attains R-superlinear convergence. We present experiments showing that these methods compare favorably to SGD.", "bibtex": "@InProceedings{pmlr-v70-zhou17a,\n title = \t {Stochastic Adaptive Quasi-{N}ewton Methods for Minimizing Expected Values},\n author = {Chaoxu Zhou and Wenbo Gao and Donald Goldfarb},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4150--4159},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhou17a/zhou17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhou17a.html},\n abstract = \t {We propose a novel class of stochastic, adaptive methods for minimizing self-concordant functions which can be expressed as an expected value. These methods generate an estimate of the true objective function by taking the empirical mean over a sample drawn at each step, making the problem tractable. The use of adaptive step sizes eliminates the need for the user to supply a step size. Methods in this class include extensions of gradient descent (GD) and BFGS. We show that, given a suitable amount of sampling, the stochastic adaptive GD attains linear convergence in expectation, and with further sampling, the stochastic adaptive BFGS attains R-superlinear convergence. We present experiments showing that these methods compare favorably to SGD.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhou17a/zhou17a.pdf", "supp": "", "pdf_size": 514576, "gs_citation": 31, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11215624795967555275&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Dept. of Industrial Engineering and Operations Research, Columbia University; Dept. of Industrial Engineering and Operations Research, Columbia University; Dept. of Industrial Engineering and Operations Research, Columbia University", "aff_domain": "columbia.edu; ; ", "email": "columbia.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/zhou17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "Dept. of Industrial Engineering and Operations Research", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Bouncy Particle Sampler", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/890", "id": "890", "author_site": "Ari Pakman, Dar Gilboa, David Carlson, Liam Paninski", "author": "Ari Pakman; Dar Gilboa; David Carlson; Liam Paninski", "abstract": "We introduce a stochastic version of the non-reversible, rejection-free Bouncy Particle Sampler (BPS), a Markov process whose sample trajectories are piecewise linear, to efficiently sample Bayesian posteriors in big datasets. We prove that in the BPS no bias is introduced by noisy evaluations of the log-likelihood gradient. On the other hand, we argue that efficiency considerations favor a small, controllable bias, in exchange for faster mixing. We introduce a simple method that controls this trade-off. We illustrate these ideas in several examples which outperform previous approaches.", "bibtex": "@InProceedings{pmlr-v70-pakman17a,\n title = \t {Stochastic Bouncy Particle Sampler},\n author = {Ari Pakman and Dar Gilboa and David Carlson and Liam Paninski},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2741--2750},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/pakman17a/pakman17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/pakman17a.html},\n abstract = \t {We introduce a stochastic version of the non-reversible, rejection-free Bouncy Particle Sampler (BPS), a Markov process whose sample trajectories are piecewise linear, to efficiently sample Bayesian posteriors in big datasets. We prove that in the BPS no bias is introduced by noisy evaluations of the log-likelihood gradient. On the other hand, we argue that efficiency considerations favor a small, controllable bias, in exchange for faster mixing. We introduce a simple method that controls this trade-off. We illustrate these ideas in several examples which outperform previous approaches.}\n}", "pdf": "http://proceedings.mlr.press/v70/pakman17a/pakman17a.pdf", "supp": "", "pdf_size": 1225708, "gs_citation": 39, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1848881478013720938&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Statistics Department and Grossman Center for the Statistics of Mind, Columbia University, New York, NY 10027, USA; Statistics Department and Grossman Center for the Statistics of Mind, Columbia University, New York, NY 10027, USA; Duke University, Durham, NC 27708, USA; Statistics Department and Grossman Center for the Statistics of Mind, Columbia University, New York, NY 10027, USA", "aff_domain": "gmail.com; ; ; ", "email": "gmail.com; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/pakman17a.html", "aff_unique_index": "0;0;1;0", "aff_unique_norm": "Columbia University;Duke University", "aff_unique_dep": "Statistics Department;", "aff_unique_url": "https://www.columbia.edu;https://www.duke.edu", "aff_unique_abbr": "Columbia;Duke", "aff_campus_unique_index": "0;0;1;0", "aff_campus_unique": "New York;Durham", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Convex Optimization: Faster Local Growth Implies Faster Global Convergence", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/505", "id": "505", "author_site": "Yi Xu, Qihang Lin, Tianbao Yang", "author": "Yi Xu; Qihang Lin; Tianbao Yang", "abstract": "In this paper, a new theory is developed for first-order stochastic convex optimization, showing that the global convergence rate is sufficiently quantified by a local growth rate of the objective function in a neighborhood of the optimal solutions. In particular, if the objective function $F(\\mathbf{w})$ in the $\\epsilon$-sublevel set grows as fast as $\\|\\mathbf{w} - \\mathbf{w}_*\\|_2^{1/\\theta}$, where $\\mathbf{w}_*$ represents the closest optimal solution to $\\mathbf{w}$ and $\\theta\\in(0,1]$ quantifies the local growth rate, the iteration complexity of first-order stochastic optimization for achieving an $\\epsilon$-optimal solution can be $\\widetilde O(1/\\epsilon^{2(1-\\theta)})$, which is", "bibtex": "@InProceedings{pmlr-v70-xu17a,\n title = \t {Stochastic Convex Optimization: Faster Local Growth Implies Faster Global Convergence},\n author = {Yi Xu and Qihang Lin and Tianbao Yang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3821--3830},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xu17a/xu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/xu17a.html},\n abstract = \t {In this paper, a new theory is developed for first-order stochastic convex optimization, showing that the global convergence rate is sufficiently quantified by a local growth rate of the objective function in a neighborhood of the optimal solutions. In particular, if the objective function $F(\\mathbf{w})$ in the $\\epsilon$-sublevel set grows as fast as $\\|\\mathbf{w} - \\mathbf{w}_*\\|_2^{1/\\theta}$, where $\\mathbf{w}_*$ represents the closest optimal solution to $\\mathbf{w}$ and $\\theta\\in(0,1]$ quantifies the local growth rate, the iteration complexity of first-order stochastic optimization for achieving an $\\epsilon$-optimal solution can be $\\widetilde O(1/\\epsilon^{2(1-\\theta)})$, which is", "pdf": "http://proceedings.mlr.press/v70/xu17a/xu17a.pdf", "supp": "", "pdf_size": 429497, "gs_citation": 52, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11252424201298571382&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Department of Computer Science, The University of Iowa; Department of Management Sciences, The University of Iowa; Department of Computer Science, The University of Iowa", "aff_domain": "uiowa.edu;uiowa.edu;uiowa.edu", "email": "uiowa.edu;uiowa.edu;uiowa.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/xu17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Iowa", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.uiowa.edu", "aff_unique_abbr": "UIowa", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic DCA for the Large-sum of Non-convex Functions Problem and its Application to Group Variable Selection in Classification", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/886", "id": "886", "author_site": "Hoai An Le Thi, Hoai Minh Le, Duy Nhat Phan, Bach Tran", "author": "Hoai An Le Thi; Hoai Minh Le; Duy Nhat Phan; Bach Tran", "abstract": "In this paper, we present a stochastic version of DCA (Difference of Convex functions Algorithm) to solve a class of optimization problems whose objective function is a large sum of non-convex functions and a regularization term. We consider the $\\ell_{2,0}$ regularization to deal with the group variables selection. By exploiting the special structure of the problem, we propose an efficient DC decomposition for which the corresponding stochastic DCA scheme is very inexpensive: it only requires the projection of points onto balls that is explicitly computed. As an application, we applied our algorithm for the group variables selection in multiclass logistic regression. Numerical experiments on several benchmark datasets and synthetic datasets illustrate the efficiency of our algorithm and its superiority over well-known methods, with respect to classification accuracy, sparsity of solution as well as running time.", "bibtex": "@InProceedings{pmlr-v70-thi17a,\n title = \t {Stochastic {DCA} for the Large-sum of Non-convex Functions Problem and its Application to Group Variable Selection in Classification},\n author = {Hoai An Le Thi and Hoai Minh Le and Duy Nhat Phan and Bach Tran},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3394--3403},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/thi17a/thi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/thi17a.html},\n abstract = \t {In this paper, we present a stochastic version of DCA (Difference of Convex functions Algorithm) to solve a class of optimization problems whose objective function is a large sum of non-convex functions and a regularization term. We consider the $\\ell_{2,0}$ regularization to deal with the group variables selection. By exploiting the special structure of the problem, we propose an efficient DC decomposition for which the corresponding stochastic DCA scheme is very inexpensive: it only requires the projection of points onto balls that is explicitly computed. As an application, we applied our algorithm for the group variables selection in multiclass logistic regression. Numerical experiments on several benchmark datasets and synthetic datasets illustrate the efficiency of our algorithm and its superiority over well-known methods, with respect to classification accuracy, sparsity of solution as well as running time.}\n}", "pdf": "http://proceedings.mlr.press/v70/thi17a/thi17a.pdf", "supp": "", "pdf_size": 281311, "gs_citation": 34, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13935756435909667571&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "Laboratory of Theoretical and Applied Computer Science, University of Lorraine, France; Laboratory of Theoretical and Applied Computer Science, University of Lorraine, France; Laboratory of Theoretical and Applied Computer Science, University of Lorraine, France; Laboratory of Theoretical and Applied Computer Science, University of Lorraine, France", "aff_domain": "univ-lorraine.fr; ; ; ", "email": "univ-lorraine.fr; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/thi17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "University of Lorraine", "aff_unique_dep": "Laboratory of Theoretical and Applied Computer Science", "aff_unique_url": "https://www.univ-lorraine.fr", "aff_unique_abbr": "", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "France" }, { "title": "Stochastic Generative Hashing", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/766", "id": "766", "author_site": "Bo Dai, Ruiqi Guo, Sanjiv Kumar, Niao He, Le Song", "author": "Bo Dai; Ruiqi Guo; Sanjiv Kumar; Niao He; Le Song", "abstract": "Learning-based binary hashing has become a powerful paradigm for fast search and retrieval in massive databases. However, due to the requirement of discrete outputs for the hash functions, learning such functions is known to be very challenging. In addition, the objective functions adopted by existing hashing techniques are mostly chosen heuristically. In this paper, we propose a novel generative approach to learn hash functions through Minimum Description Length principle such that the learned hash codes maximally compress the dataset and can also be used to regenerate the inputs. We also develop an efficient learning algorithm based on the stochastic distributional gradient, which avoids the notorious difficulty caused by binary output constraints, to jointly optimize the parameters of the hash function and the associated generative model. Extensive experiments on a variety of large-scale datasets show that the proposed method achieves better retrieval results than the existing state-of-the-art methods.", "bibtex": "@InProceedings{pmlr-v70-dai17a,\n title = \t {Stochastic Generative Hashing},\n author = {Bo Dai and Ruiqi Guo and Sanjiv Kumar and Niao He and Le Song},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {913--922},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dai17a/dai17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dai17a.html},\n abstract = \t {Learning-based binary hashing has become a powerful paradigm for fast search and retrieval in massive databases. However, due to the requirement of discrete outputs for the hash functions, learning such functions is known to be very challenging. In addition, the objective functions adopted by existing hashing techniques are mostly chosen heuristically. In this paper, we propose a novel generative approach to learn hash functions through Minimum Description Length principle such that the learned hash codes maximally compress the dataset and can also be used to regenerate the inputs. We also develop an efficient learning algorithm based on the stochastic distributional gradient, which avoids the notorious difficulty caused by binary output constraints, to jointly optimize the parameters of the hash function and the associated generative model. Extensive experiments on a variety of large-scale datasets show that the proposed method achieves better retrieval results than the existing state-of-the-art methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/dai17a/dai17a.pdf", "supp": "", "pdf_size": 2031960, "gs_citation": 142, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=863160425188829762&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "Georgia Institute of Technology; Google Research, NYC; Google Research, NYC; University of Illinois at Urbana-Champaign; Georgia Institute of Technology", "aff_domain": "gatech.edu; ; ; ; ", "email": "gatech.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/dai17a.html", "aff_unique_index": "0;1;1;2;0", "aff_unique_norm": "Georgia Institute of Technology;Google;University of Illinois Urbana-Champaign", "aff_unique_dep": ";Google Research;", "aff_unique_url": "https://www.gatech.edu;https://research.google;https://illinois.edu", "aff_unique_abbr": "Georgia Tech;Google Research;UIUC", "aff_campus_unique_index": "1;1;2", "aff_campus_unique": ";New York City;Urbana-Champaign", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Gradient MCMC Methods for Hidden Markov Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/829", "id": "829", "author_site": "Yi-An Ma, Nicholas J Foti, Emily Fox", "author": "Yi-An Ma; Nicholas J. Foti; Emily B. Fox", "abstract": "Stochastic gradient MCMC (SG-MCMC) algorithms have proven useful in scaling Bayesian inference to large datasets under an assumption of i.i.d data. We instead develop an SG-MCMC algorithm to learn the parameters of hidden Markov models (HMMs) for time-dependent data. There are two challenges to applying SG-MCMC in this setting: The latent discrete states, and needing to break dependencies when considering minibatches. We consider a marginal likelihood representation of the HMM and propose an algorithm that harnesses the inherent memory decay of the process. We demonstrate the effectiveness of our algorithm on synthetic experiments and an ion channel recording data, with runtimes significantly outperforming batch MCMC.", "bibtex": "@InProceedings{pmlr-v70-ma17a,\n title = \t {Stochastic Gradient {MCMC} Methods for Hidden {M}arkov Models},\n author = {Yi-An Ma and Nicholas J. Foti and Emily B. Fox},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2265--2274},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ma17a/ma17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ma17a.html},\n abstract = \t {Stochastic gradient MCMC (SG-MCMC) algorithms have proven useful in scaling Bayesian inference to large datasets under an assumption of i.i.d data. We instead develop an SG-MCMC algorithm to learn the parameters of hidden Markov models (HMMs) for time-dependent data. There are two challenges to applying SG-MCMC in this setting: The latent discrete states, and needing to break dependencies when considering minibatches. We consider a marginal likelihood representation of the HMM and propose an algorithm that harnesses the inherent memory decay of the process. We demonstrate the effectiveness of our algorithm on synthetic experiments and an ion channel recording data, with runtimes significantly outperforming batch MCMC.}\n}", "pdf": "http://proceedings.mlr.press/v70/ma17a/ma17a.pdf", "supp": "", "pdf_size": 2488540, "gs_citation": 35, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2312930169334280277&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "University of Washington, Seattle, WA, USA; University of Washington, Seattle, WA, USA; University of Washington, Seattle, WA, USA", "aff_domain": "uw.edu; ; ", "email": "uw.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/ma17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Washington", "aff_unique_dep": "", "aff_unique_url": "https://www.washington.edu", "aff_unique_abbr": "UW", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Gradient Monomial Gamma Sampler", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/768", "id": "768", "author_site": "Yizhe Zhang, Changyou Chen, Zhe Gan, Ricardo Henao, Lawrence Carin", "author": "Yizhe Zhang; Changyou Chen; Zhe Gan; Ricardo Henao; Lawrence Carin", "abstract": "Scaling Markov Chain Monte Carlo (MCMC) to estimate posterior distributions from large datasets has been made possible as a result of advances in stochastic gradient techniques. Despite their success, mixing performance of existing methods when sampling from multimodal distributions can be less efficient with insufficient Monte Carlo samples; this is evidenced by slow convergence and insufficient exploration of posterior distributions. We propose a generalized framework to improve the sampling efficiency of stochastic gradient MCMC, by leveraging a generalized kinetics that delivers superior stationary mixing, especially in multimodal distributions, and propose several techniques to overcome the practical issues. We show that the proposed approach is better at exploring a complicated multimodal posterior distribution, and demonstrate improvements over other stochastic gradient MCMC methods on various applications.", "bibtex": "@InProceedings{pmlr-v70-zhang17a,\n title = \t {Stochastic Gradient Monomial Gamma Sampler},\n author = {Yizhe Zhang and Changyou Chen and Zhe Gan and Ricardo Henao and Lawrence Carin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3996--4005},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17a/zhang17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17a.html},\n abstract = \t {Scaling Markov Chain Monte Carlo (MCMC) to estimate posterior distributions from large datasets has been made possible as a result of advances in stochastic gradient techniques. Despite their success, mixing performance of existing methods when sampling from multimodal distributions can be less efficient with insufficient Monte Carlo samples; this is evidenced by slow convergence and insufficient exploration of posterior distributions. We propose a generalized framework to improve the sampling efficiency of stochastic gradient MCMC, by leveraging a generalized kinetics that delivers superior stationary mixing, especially in multimodal distributions, and propose several techniques to overcome the practical issues. We show that the proposed approach is better at exploring a complicated multimodal posterior distribution, and demonstrate improvements over other stochastic gradient MCMC methods on various applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17a/zhang17a.pdf", "supp": "", "pdf_size": 1780936, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11643666746320389258&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Duke University; Duke University; Duke University; Duke University; Duke University", "aff_domain": "duke.edu; ; ; ; ", "email": "duke.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/zhang17a.html", "aff_unique_index": "0;0;0;0;0", "aff_unique_norm": "Duke University", "aff_unique_dep": "", "aff_unique_url": "https://www.duke.edu", "aff_unique_abbr": "Duke", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Stochastic Modified Equations and Adaptive Stochastic Gradient Algorithms", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/530", "id": "530", "author_site": "Qianxiao Li, Cheng Tai, Weinan E", "author": "Qianxiao Li; Cheng Tai; Weinan E", "abstract": "We develop the method of stochastic modified equations (SME), in which stochastic gradient algorithms are approximated in the weak sense by continuous-time stochastic differential equations. We exploit the continuous formulation together with optimal control theory to derive novel adaptive hyper-parameter adjustment policies. Our algorithms have competitive performance with the added benefit of being robust to varying models and datasets. This provides a general methodology for the analysis and design of stochastic gradient algorithms.", "bibtex": "@InProceedings{pmlr-v70-li17f,\n title = \t {Stochastic Modified Equations and Adaptive Stochastic Gradient Algorithms},\n author = {Qianxiao Li and Cheng Tai and Weinan E},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2101--2110},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/li17f/li17f.pdf},\n url = \t {https://proceedings.mlr.press/v70/li17f.html},\n abstract = \t {We develop the method of stochastic modified equations (SME), in which stochastic gradient algorithms are approximated in the weak sense by continuous-time stochastic differential equations. We exploit the continuous formulation together with optimal control theory to derive novel adaptive hyper-parameter adjustment policies. Our algorithms have competitive performance with the added benefit of being robust to varying models and datasets. This provides a general methodology for the analysis and design of stochastic gradient algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/li17f/li17f.pdf", "supp": "", "pdf_size": 596850, "gs_citation": 350, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5449311691062785243&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Institute of High Performance Computing, Singapore; Peking University, Beijing, China+Beijing Institute of Big Data Research, Beijing, China; Peking University, Beijing, China+Beijing Institute of Big Data Research, Beijing, China+Princeton University, Princeton, NJ, USA", "aff_domain": "ihpc.a-star.edu.sg; ; ", "email": "ihpc.a-star.edu.sg; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/li17f.html", "aff_unique_index": "0;1+2;1+2+3", "aff_unique_norm": "Institute of High Performance Computing;Peking University;Beijing Institute of Big Data Research;Princeton University", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.ihpc.a-star.edu.sg;http://www.pku.edu.cn;;https://www.princeton.edu", "aff_unique_abbr": "IHPC;Peking U;;Princeton", "aff_campus_unique_index": "1+1;1+1+2", "aff_campus_unique": ";Beijing;Princeton", "aff_country_unique_index": "0;1+1;1+1+2", "aff_country_unique": "Singapore;China;United States" }, { "title": "Stochastic Variance Reduction Methods for Policy Evaluation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/648", "id": "648", "author_site": "Simon Du, Jianshu Chen, Lihong Li, Lin Xiao, Dengyong Zhou", "author": "Simon S. Du; Jianshu Chen; Lihong Li; Lin Xiao; Dengyong Zhou", "abstract": "Policy evaluation is concerned with estimating the value function that predicts long-term values of states under a given policy. It is a crucial step in many reinforcement-learning algorithms. In this paper, we focus on policy evaluation with linear function approximation over a fixed dataset. We first transform the empirical policy evaluation problem into a (quadratic) convex-concave saddle-point problem, and then present a primal-dual batch gradient method, as well as two stochastic variance reduction methods for solving the problem. These algorithms scale linearly in both sample size and feature dimension. Moreover, they achieve linear convergence even when the saddle-point problem has only strong concavity in the dual variables but no strong convexity in the primal variables. Numerical experiments on benchmark problems demonstrate the effectiveness of our methods.", "bibtex": "@InProceedings{pmlr-v70-du17a,\n title = \t {Stochastic Variance Reduction Methods for Policy Evaluation},\n author = {Simon S. Du and Jianshu Chen and Lihong Li and Lin Xiao and Dengyong Zhou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1049--1058},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/du17a/du17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/du17a.html},\n abstract = \t {Policy evaluation is concerned with estimating the value function that predicts long-term values of states under a given policy. It is a crucial step in many reinforcement-learning algorithms. In this paper, we focus on policy evaluation with linear function approximation over a fixed dataset. We first transform the empirical policy evaluation problem into a (quadratic) convex-concave saddle-point problem, and then present a primal-dual batch gradient method, as well as two stochastic variance reduction methods for solving the problem. These algorithms scale linearly in both sample size and feature dimension. Moreover, they achieve linear convergence even when the saddle-point problem has only strong concavity in the dual variables but no strong convexity in the primal variables. Numerical experiments on benchmark problems demonstrate the effectiveness of our methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/du17a/du17a.pdf", "supp": "", "pdf_size": 1410204, "gs_citation": 218, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13674130294144194350&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Machine Learning Department, Carnegie Mellon University, Pittsburgh, Pennsylvania 15213, USA; Microsoft Research, Redmond, Washington 98052, USA; Microsoft Research, Redmond, Washington 98052, USA; Microsoft Research, Redmond, Washington 98052, USA; Microsoft Research, Redmond, Washington 98052, USA", "aff_domain": "cs.cmu.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", "email": "cs.cmu.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/du17a.html", "aff_unique_index": "0;1;1;1;1", "aff_unique_norm": "Carnegie Mellon University;Microsoft", "aff_unique_dep": "Machine Learning Department;Microsoft Research", "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "CMU;MSR", "aff_campus_unique_index": "0;1;1;1;1", "aff_campus_unique": "Pittsburgh;Redmond", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Strong NP-Hardness for Sparse Optimization with Concave Penalty Functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/726", "id": "726", "author_site": "Yichen Chen, Dongdong Ge, Mengdi Wang, Zizhuo Wang, Yinyu Ye, Hao Yin", "author": "Yichen Chen; Dongdong Ge; Mengdi Wang; Zizhuo Wang; Yinyu Ye; Hao Yin", "abstract": "Consider the regularized sparse minimization problem, which involves empirical sums of loss functions for $n$ data points (each of dimension $d$) and a nonconvex sparsity penalty. We prove that finding an $\\mathcal{O}(n^{c_1}d^{c_2})$-optimal solution to the regularized sparse optimization problem is strongly NP-hard for any $c_1, c_2\\in [0,1)$ such that $c_1+c_2<1$. The result applies to a broad class of loss functions and sparse penalty functions. It suggests that one cannot even approximately solve the sparse optimization problem in polynomial time, unless P $=$ NP.", "bibtex": "@InProceedings{pmlr-v70-chen17d,\n title = \t {Strong {NP}-Hardness for Sparse Optimization with Concave Penalty Functions},\n author = {Yichen Chen and Dongdong Ge and Mengdi Wang and Zizhuo Wang and Yinyu Ye and Hao Yin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {740--747},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17d/chen17d.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17d.html},\n abstract = \t {Consider the regularized sparse minimization problem, which involves empirical sums of loss functions for $n$ data points (each of dimension $d$) and a nonconvex sparsity penalty. We prove that finding an $\\mathcal{O}(n^{c_1}d^{c_2})$-optimal solution to the regularized sparse optimization problem is strongly NP-hard for any $c_1, c_2\\in [0,1)$ such that $c_1+c_2<1$. The result applies to a broad class of loss functions and sparse penalty functions. It suggests that one cannot even approximately solve the sparse optimization problem in polynomial time, unless P $=$ NP.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17d/chen17d.pdf", "supp": "", "pdf_size": 243473, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15721361869410413155&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Princeton University, NJ, USA; Shanghai University of Finance and Economics, Shanghai, China; University of Minnesota, MN, USA; Stanford University, CA, USA; Stanford University, CA, USA; Stanford University, CA, USA", "aff_domain": "princeton.edu; ; ; ; ; ", "email": "princeton.edu; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/chen17d.html", "aff_unique_index": "0;1;2;3;3;3", "aff_unique_norm": "Princeton University;Shanghai University of Finance and Economics;University of Minnesota;Stanford University", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.princeton.edu;http://www.sufe.edu.cn;https://www.minnesota.edu;https://www.stanford.edu", "aff_unique_abbr": "Princeton;SUFE;UMN;Stanford", "aff_campus_unique_index": "0;1;2;3;3;3", "aff_campus_unique": "Princeton;Shanghai;MN;California", "aff_country_unique_index": "0;1;0;0;0;0", "aff_country_unique": "United States;China" }, { "title": "Strongly-Typed Agents are Guaranteed to Interact Safely", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/599", "id": "599", "author": "David Balduzzi", "abstract": "As artificial agents proliferate, it is becoming increasingly important to ensure that their interactions with one another are well-behaved. In this paper, we formalize a common-sense notion of when algorithms are well-behaved: an algorithm is safe if it does no harm. Motivated by recent progress in deep learning, we focus on the specific case where agents update their actions according to gradient descent. The paper shows that that gradient descent converges to a Nash equilibrium in safe games. The main contribution is to define strongly-typed agents and show they are guaranteed to interact safely, thereby providing sufficient conditions to guarantee safe interactions. A series of examples show that strong-typing generalizes certain key features of convexity, is closely related to blind source separation, and introduces a new perspective on classical multilinear games based on tensor decomposition.", "bibtex": "@InProceedings{pmlr-v70-balduzzi17a,\n title = \t {Strongly-Typed Agents are Guaranteed to Interact Safely},\n author = {David Balduzzi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {332--341},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balduzzi17a/balduzzi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/balduzzi17a.html},\n abstract = \t {As artificial agents proliferate, it is becoming increasingly important to ensure that their interactions with one another are well-behaved. In this paper, we formalize a common-sense notion of when algorithms are well-behaved: an algorithm is safe if it does no harm. Motivated by recent progress in deep learning, we focus on the specific case where agents update their actions according to gradient descent. The paper shows that that gradient descent converges to a Nash equilibrium in safe games. The main contribution is to define strongly-typed agents and show they are guaranteed to interact safely, thereby providing sufficient conditions to guarantee safe interactions. A series of examples show that strong-typing generalizes certain key features of convexity, is closely related to blind source separation, and introduces a new perspective on classical multilinear games based on tensor decomposition.}\n}", "pdf": "http://proceedings.mlr.press/v70/balduzzi17a/balduzzi17a.pdf", "supp": "", "pdf_size": 504667, "gs_citation": 3, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=218202175408560705&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Victoria University of Wellington, New Zealand", "aff_domain": "gmail.com", "email": "gmail.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/balduzzi17a.html", "aff_unique_index": "0", "aff_unique_norm": "Victoria University of Wellington", "aff_unique_dep": "", "aff_unique_url": "https://www.victoria.ac.nz", "aff_unique_abbr": "VUW", "aff_country_unique_index": "0", "aff_country_unique": "New Zealand" }, { "title": "Sub-sampled Cubic Regularization for Non-convex Optimization", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/563", "id": "563", "author_site": "Jonas Kohler, Aurelien Lucchi", "author": "Jonas Moritz Kohler; Aurelien Lucchi", "abstract": "We consider the minimization of non-convex functions that typically arise in machine learning. Specifically, we focus our attention on a variant of trust region methods known as cubic regularization. This approach is particularly attractive because it escapes strict saddle points and it provides stronger convergence guarantees than first- and second-order as well as classical trust region methods. However, it suffers from a high computational complexity that makes it impractical for large-scale learning. Here, we propose a novel method that uses sub-sampling to lower this computational cost. By the use of concentration inequalities we provide a sampling scheme that gives sufficiently accurate gradient and Hessian approximations to retain the strong global and local convergence guarantees of cubically regularized methods. To the best of our knowledge this is the first work that gives global convergence guarantees for a sub-sampled variant of cubic regularization on non-convex functions. Furthermore, we provide experimental results supporting our theory.", "bibtex": "@InProceedings{pmlr-v70-kohler17a,\n title = \t {Sub-sampled Cubic Regularization for Non-convex Optimization},\n author = {Jonas Moritz Kohler and Aurelien Lucchi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1895--1904},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kohler17a/kohler17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kohler17a.html},\n abstract = \t {We consider the minimization of non-convex functions that typically arise in machine learning. Specifically, we focus our attention on a variant of trust region methods known as cubic regularization. This approach is particularly attractive because it escapes strict saddle points and it provides stronger convergence guarantees than first- and second-order as well as classical trust region methods. However, it suffers from a high computational complexity that makes it impractical for large-scale learning. Here, we propose a novel method that uses sub-sampling to lower this computational cost. By the use of concentration inequalities we provide a sampling scheme that gives sufficiently accurate gradient and Hessian approximations to retain the strong global and local convergence guarantees of cubically regularized methods. To the best of our knowledge this is the first work that gives global convergence guarantees for a sub-sampled variant of cubic regularization on non-convex functions. Furthermore, we provide experimental results supporting our theory.}\n}", "pdf": "http://proceedings.mlr.press/v70/kohler17a/kohler17a.pdf", "supp": "", "pdf_size": 633606, "gs_citation": 218, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6274934951740552221&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, ETH Zurich, Switzerland; Department of Computer Science, ETH Zurich, Switzerland", "aff_domain": "student.kit.edu;inf.ethz.ch", "email": "student.kit.edu;inf.ethz.ch", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/kohler17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Switzerland" }, { "title": "Tensor Balancing on Statistical Manifold", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/631", "id": "631", "author_site": "Mahito Sugiyama, Hiroyuki Nakahara, Koji Tsuda", "author": "Mahito Sugiyama; Hiroyuki Nakahara; Koji Tsuda", "abstract": "We solve tensor balancing, rescaling an Nth order nonnegative tensor by multiplying N tensors of order N - 1 so that every fiber sums to one. This generalizes a fundamental process of matrix balancing used to compare matrices in a wide range of applications from biology to economics. We present an efficient balancing algorithm with quadratic convergence using Newton\u2019s method and show in numerical experiments that the proposed algorithm is several orders of magnitude faster than existing ones. To theoretically prove the correctness of the algorithm, we model tensors as probability distributions in a statistical manifold and realize tensor balancing as projection onto a submanifold. The key to our algorithm is that the gradient of the manifold, used as a Jacobian matrix in Newton\u2019s method, can be analytically obtained using the M\u00f6bius inversion formula, the essential of combinatorial mathematics. Our model is not limited to tensor balancing, but has a wide applicability as it includes various statistical and machine learning models such as weighted DAGs and Boltzmann machines.", "bibtex": "@InProceedings{pmlr-v70-sugiyama17a,\n title = \t {Tensor Balancing on Statistical Manifold},\n author = {Mahito Sugiyama and Hiroyuki Nakahara and Koji Tsuda},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3270--3279},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sugiyama17a/sugiyama17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/sugiyama17a.html},\n abstract = \t {We solve tensor balancing, rescaling an Nth order nonnegative tensor by multiplying N tensors of order N - 1 so that every fiber sums to one. This generalizes a fundamental process of matrix balancing used to compare matrices in a wide range of applications from biology to economics. We present an efficient balancing algorithm with quadratic convergence using Newton\u2019s method and show in numerical experiments that the proposed algorithm is several orders of magnitude faster than existing ones. To theoretically prove the correctness of the algorithm, we model tensors as probability distributions in a statistical manifold and realize tensor balancing as projection onto a submanifold. The key to our algorithm is that the gradient of the manifold, used as a Jacobian matrix in Newton\u2019s method, can be analytically obtained using the M\u00f6bius inversion formula, the essential of combinatorial mathematics. Our model is not limited to tensor balancing, but has a wide applicability as it includes various statistical and machine learning models such as weighted DAGs and Boltzmann machines.}\n}", "pdf": "http://proceedings.mlr.press/v70/sugiyama17a/sugiyama17a.pdf", "supp": "", "pdf_size": 177156, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4227888270727129551&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "National Institute of Informatics+JST PRESTO; RIKEN Brain Science Institute; Graduate School of Frontier Sciences, The University of Tokyo+RIKEN AIP+NIMS", "aff_domain": "nii.ac.jp; ; ", "email": "nii.ac.jp; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/sugiyama17a.html", "aff_unique_index": "0+1;2;3+2+4", "aff_unique_norm": "National Institute of Informatics;Japan Science and Technology Agency;RIKEN;University of Tokyo;National Institute for Materials Science", "aff_unique_dep": ";PRESTO;Brain Science Institute;Graduate School of Frontier Sciences;", "aff_unique_url": "https://www.nii.ac.jp/;https://www.jst.go.jp;https://briken.org;https://www.u-tokyo.ac.jp;https://www.nims.go.jp", "aff_unique_abbr": "NII;JST;RIKEN;UTokyo;NIMS", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Tokyo", "aff_country_unique_index": "0+0;0;0+0+0", "aff_country_unique": "Japan" }, { "title": "Tensor Belief Propagation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/651", "id": "651", "author_site": "Andrew Wrigley, Wee Sun Lee, Nan Ye", "author": "Andrew Wrigley; Wee Sun Lee; Nan Ye", "abstract": "We propose a new approximate inference algorithm for graphical models, tensor belief propagation, based on approximating the messages passed in the junction tree algorithm. Our algorithm represents the potential functions of the graphical model and all messages on the junction tree compactly as mixtures of rank-1 tensors. Using this representation, we show how to perform the operations required for inference on the junction tree efficiently: marginalisation can be computed quickly due to the factored form of rank-1 tensors while multiplication can be approximated using sampling. Our analysis gives sufficient conditions for the algorithm to perform well, including for the case of high-treewidth graphs, for which exact inference is intractable. We compare our algorithm experimentally with several approximate inference algorithms and show that it performs well.", "bibtex": "@InProceedings{pmlr-v70-wrigley17a,\n title = \t {Tensor Belief Propagation},\n author = {Andrew Wrigley and Wee Sun Lee and Nan Ye},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3771--3779},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wrigley17a/wrigley17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/wrigley17a.html},\n abstract = \t {We propose a new approximate inference algorithm for graphical models, tensor belief propagation, based on approximating the messages passed in the junction tree algorithm. Our algorithm represents the potential functions of the graphical model and all messages on the junction tree compactly as mixtures of rank-1 tensors. Using this representation, we show how to perform the operations required for inference on the junction tree efficiently: marginalisation can be computed quickly due to the factored form of rank-1 tensors while multiplication can be approximated using sampling. Our analysis gives sufficient conditions for the algorithm to perform well, including for the case of high-treewidth graphs, for which exact inference is intractable. We compare our algorithm experimentally with several approximate inference algorithms and show that it performs well.}\n}", "pdf": "http://proceedings.mlr.press/v70/wrigley17a/wrigley17a.pdf", "supp": "", "pdf_size": 398727, "gs_citation": 14, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14091009898879668952&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 9, "aff": "Australian National University, Canberra, Australia; National University of Singapore, Singapore; Queensland University of Technology, Brisbane, Australia", "aff_domain": "anu.edu.au;comp.nus.edu.sg;qut.edu.au", "email": "anu.edu.au;comp.nus.edu.sg;qut.edu.au", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/wrigley17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Australian National University;National University of Singapore;Queensland University of Technology", "aff_unique_dep": ";;", "aff_unique_url": "https://www.anu.edu.au;https://www.nus.edu.sg;https://www.qut.edu.au", "aff_unique_abbr": "ANU;NUS;QUT", "aff_campus_unique_index": "0;2", "aff_campus_unique": "Canberra;;Brisbane", "aff_country_unique_index": "0;1;0", "aff_country_unique": "Australia;Singapore" }, { "title": "Tensor Decomposition via Simultaneous Power Iteration", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/560", "id": "560", "author_site": "Poan Wang, Chi-Jen Lu", "author": "Po-An Wang; Chi-Jen Lu", "abstract": "Tensor decomposition is an important problem with many applications across several disciplines, and a popular approach for this problem is the tensor power method. However, previous works with theoretical guarantee based on this approach can only find the top eigenvectors one after one, unlike the case for matrices. In this paper, we show how to find the eigenvectors simultaneously with the help of a new initialization procedure. This allows us to achieve a better running time in the batch setting, as well as a lower sample complexity in the streaming setting.", "bibtex": "@InProceedings{pmlr-v70-wang17i,\n title = \t {Tensor Decomposition via Simultaneous Power Iteration},\n author = {Po-An Wang and Chi-Jen Lu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3665--3673},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17i/wang17i.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17i.html},\n abstract = \t {Tensor decomposition is an important problem with many applications across several disciplines, and a popular approach for this problem is the tensor power method. However, previous works with theoretical guarantee based on this approach can only find the top eigenvectors one after one, unlike the case for matrices. In this paper, we show how to find the eigenvectors simultaneously with the help of a new initialization procedure. This allows us to achieve a better running time in the batch setting, as well as a lower sample complexity in the streaming setting.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17i/wang17i.pdf", "supp": "", "pdf_size": 324302, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2852500943480701345&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Academia Sinica; Academia Sinica", "aff_domain": "iis.sinica.edu.tw; ", "email": "iis.sinica.edu.tw; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/wang17i.html", "aff_unique_index": "0;0", "aff_unique_norm": "Academia Sinica", "aff_unique_dep": "", "aff_unique_url": "https://www.sinica.edu.tw", "aff_unique_abbr": "Academia Sinica", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Taiwan", "aff_country_unique_index": "0;0", "aff_country_unique": "China" }, { "title": "Tensor Decomposition with Smoothness", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/556", "id": "556", "author_site": "Masaaki Imaizumi, Kohei Hayashi", "author": "Masaaki Imaizumi; Kohei Hayashi", "abstract": "Real data tensors are usually high dimensional but their intrinsic information is preserved in low-dimensional space, which motivates to use tensor decompositions such as Tucker decomposition. Often, real data tensors are not only low dimensional, but also smooth, meaning that the adjacent elements are similar or continuously changing, which typically appear as spatial or temporal data. To incorporate the smoothness property, we propose the smoothed Tucker decomposition (STD). STD leverages the smoothness by the sum of a few basis functions, which reduces the number of parameters. The objective function is formulated as a convex problem and, to solve that, an algorithm based on the alternating direction method of multipliers is derived. We theoretically show that, under the smoothness assumption, STD achieves a better error bound. The theoretical result and performances of STD are numerically verified.", "bibtex": "@InProceedings{pmlr-v70-imaizumi17a,\n title = \t {Tensor Decomposition with Smoothness},\n author = {Masaaki Imaizumi and Kohei Hayashi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1597--1606},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/imaizumi17a/imaizumi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/imaizumi17a.html},\n abstract = \t {Real data tensors are usually high dimensional but their intrinsic information is preserved in low-dimensional space, which motivates to use tensor decompositions such as Tucker decomposition. Often, real data tensors are not only low dimensional, but also smooth, meaning that the adjacent elements are similar or continuously changing, which typically appear as spatial or temporal data. To incorporate the smoothness property, we propose the smoothed Tucker decomposition (STD). STD leverages the smoothness by the sum of a few basis functions, which reduces the number of parameters. The objective function is formulated as a convex problem and, to solve that, an algorithm based on the alternating direction method of multipliers is derived. We theoretically show that, under the smoothness assumption, STD achieves a better error bound. The theoretical result and performances of STD are numerically verified.}\n}", "pdf": "http://proceedings.mlr.press/v70/imaizumi17a/imaizumi17a.pdf", "supp": "", "pdf_size": 1209313, "gs_citation": 29, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5292513227877130959&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Institute of Statistical Mathematics+National Institute of Advanced Industrial Science and Technology+RIKEN; National Institute of Advanced Industrial Science and Technology+RIKEN", "aff_domain": "hotmail.com; ", "email": "hotmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/imaizumi17a.html", "aff_unique_index": "0+1+2;1+2", "aff_unique_norm": "Institute of Statistical Mathematics;National Institute of Advanced Industrial Science and Technology;RIKEN", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ism.ac.jp;https://www.aist.go.jp;https://www.riken.jp", "aff_unique_abbr": "ISM;AIST;RIKEN", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0+0;0+0", "aff_country_unique": "Japan" }, { "title": "Tensor-Train Recurrent Neural Networks for Video Classification", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/770", "id": "770", "author_site": "Yinchong Yang, Denis Krompass, Volker Tresp", "author": "Yinchong Yang; Denis Krompass; Volker Tresp", "abstract": "The Recurrent Neural Networks and their variants have shown promising performances in sequence modeling tasks such as Natural Language Processing. These models, however, turn out to be impractical and difficult to train when exposed to very high-dimensional inputs due to the large input-to-hidden weight matrix. This may have prevented RNNs\u2019 large-scale application in tasks that involve very high input dimensions such as video modeling; current approaches reduce the input dimensions using various feature extractors. To address this challenge, we propose a new, more general and efficient approach by factorizing the input-to-hidden weight matrix using Tensor-Train decomposition which is trained simultaneously with the weights themselves. We test our model on classification tasks using multiple real-world video datasets and achieve competitive performances with state-of-the-art models, even though our model architecture is orders of magnitude less complex. We believe that the proposed approach provides a novel and fundamental building block for modeling high-dimensional sequential data with RNN architectures and opens up many possibilities to transfer the expressive and advanced architectures from other domains such as NLP to modeling high-dimensional sequential data.", "bibtex": "@InProceedings{pmlr-v70-yang17e,\n title = \t {Tensor-Train Recurrent Neural Networks for Video Classification},\n author = {Yinchong Yang and Denis Krompass and Volker Tresp},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3891--3900},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17e/yang17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17e.html},\n abstract = \t {The Recurrent Neural Networks and their variants have shown promising performances in sequence modeling tasks such as Natural Language Processing. These models, however, turn out to be impractical and difficult to train when exposed to very high-dimensional inputs due to the large input-to-hidden weight matrix. This may have prevented RNNs\u2019 large-scale application in tasks that involve very high input dimensions such as video modeling; current approaches reduce the input dimensions using various feature extractors. To address this challenge, we propose a new, more general and efficient approach by factorizing the input-to-hidden weight matrix using Tensor-Train decomposition which is trained simultaneously with the weights themselves. We test our model on classification tasks using multiple real-world video datasets and achieve competitive performances with state-of-the-art models, even though our model architecture is orders of magnitude less complex. We believe that the proposed approach provides a novel and fundamental building block for modeling high-dimensional sequential data with RNN architectures and opens up many possibilities to transfer the expressive and advanced architectures from other domains such as NLP to modeling high-dimensional sequential data.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17e/yang17e.pdf", "supp": "", "pdf_size": 642998, "gs_citation": 311, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7590362166633256800&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Ludwig Maximilian University of Munich, Germany+Siemens AG, Corporate Technology, Germany; Siemens AG, Corporate Technology, Germany; Ludwig Maximilian University of Munich, Germany+Siemens AG, Corporate Technology, Germany", "aff_domain": "siemens.com; ; ", "email": "siemens.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/yang17e.html", "aff_unique_index": "0+1;1;0+1", "aff_unique_norm": "Ludwig Maximilian University of Munich;Siemens AG", "aff_unique_dep": ";Corporate Technology", "aff_unique_url": "https://www.lmu.de;https://www.siemens.com", "aff_unique_abbr": "LMU;Siemens", "aff_campus_unique_index": ";", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0+0", "aff_country_unique": "Germany" }, { "title": "The Loss Surface of Deep and Wide Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/724", "id": "724", "author_site": "Quynh Nguyen, Matthias Hein", "author": "Quynh Nguyen; Matthias Hein", "abstract": "While the optimization problem behind deep neural networks is highly non-convex, it is frequently observed in practice that training deep networks seems possible without getting stuck in suboptimal points. It has been argued that this is the case as all local minima are close to being globally optimal. We show that this is (almost) true, in fact almost all local minima are globally optimal, for a fully connected network with squared loss and analytic activation function given that the number of hidden units of one layer of the network is larger than the number of training points and the network structure from this layer on is pyramidal.", "bibtex": "@InProceedings{pmlr-v70-nguyen17a,\n title = \t {The Loss Surface of Deep and Wide Neural Networks},\n author = {Quynh Nguyen and Matthias Hein},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2603--2612},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/nguyen17a/nguyen17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/nguyen17a.html},\n abstract = \t {While the optimization problem behind deep neural networks is highly non-convex, it is frequently observed in practice that training deep networks seems possible without getting stuck in suboptimal points. It has been argued that this is the case as all local minima are close to being globally optimal. We show that this is (almost) true, in fact almost all local minima are globally optimal, for a fully connected network with squared loss and analytic activation function given that the number of hidden units of one layer of the network is larger than the number of training points and the network structure from this layer on is pyramidal.}\n}", "pdf": "http://proceedings.mlr.press/v70/nguyen17a/nguyen17a.pdf", "supp": "", "pdf_size": 284102, "gs_citation": 331, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13485985117698692567&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Department of Mathematics and Computer Science, Saarland University, Germany; Department of Mathematics and Computer Science, Saarland University, Germany", "aff_domain": "cs.uni-saarland.de; ", "email": "cs.uni-saarland.de; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/nguyen17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Saarland University", "aff_unique_dep": "Department of Mathematics and Computer Science", "aff_unique_url": "https://www.uni-saarland.de", "aff_unique_abbr": "Saarland U", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Germany" }, { "title": "The Predictron: End-To-End Learning and Planning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/707", "id": "707", "author_site": "David Silver, Hado van Hasselt, Matteo Hessel, Tom Schaul, Arthur Guez, Tim Harley, Gabriel Dulac-Arnold, David Reichert, Neil Rabinowitz, Andre Barreto, Thomas Degris", "author": "David Silver; Hado Hasselt; Matteo Hessel; Tom Schaul; Arthur Guez; Tim Harley; Gabriel Dulac-Arnold; David Reichert; Neil Rabinowitz; Andre Barreto; Thomas Degris", "abstract": "One of the key challenges of artificial intelligence is to learn models that are effective in the context of planning. In this document we introduce the predictron architecture. The predictron consists of a fully abstract model, represented by a Markov reward process, that can be rolled forward multiple \u201cimagined\u201d planning steps. Each forward pass of the predictron accumulates internal rewards and values over multiple planning depths. The predictron is trained end-to-end so as to make these accumulated values accurately approximate the true value function. We applied the predictron to procedurally generated random mazes and a simulator for the game of pool. The predictron yielded significantly more accurate predictions than conventional deep neural network architectures.", "bibtex": "@InProceedings{pmlr-v70-silver17a,\n title = \t {The Predictron: End-To-End Learning and Planning},\n author = {David Silver and Hado van Hasselt and Matteo Hessel and Tom Schaul and Arthur Guez and Tim Harley and Gabriel Dulac-Arnold and David Reichert and Neil Rabinowitz and Andre Barreto and Thomas Degris},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3191--3199},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/silver17a/silver17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/silver17a.html},\n abstract = \t {One of the key challenges of artificial intelligence is to learn models that are effective in the context of planning. In this document we introduce the predictron architecture. The predictron consists of a fully abstract model, represented by a Markov reward process, that can be rolled forward multiple \u201cimagined\u201d planning steps. Each forward pass of the predictron accumulates internal rewards and values over multiple planning depths. The predictron is trained end-to-end so as to make these accumulated values accurately approximate the true value function. We applied the predictron to procedurally generated random mazes and a simulator for the game of pool. The predictron yielded significantly more accurate predictions than conventional deep neural network architectures.}\n}", "pdf": "http://proceedings.mlr.press/v70/silver17a/silver17a.pdf", "supp": "", "pdf_size": 909957, "gs_citation": 327, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=123025585147889247&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 10, "aff": "DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London; DeepMind, London", "aff_domain": "google.com;google.com;google.com;google.com;google.com; ; ; ; ; ; ", "email": "google.com;google.com;google.com;google.com;google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 11, "oa": "https://proceedings.mlr.press/v70/silver17a.html", "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "The Price of Differential Privacy for Online Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/582", "id": "582", "author_site": "Naman Agarwal, Karan Singh", "author": "Naman Agarwal; Karan Singh", "abstract": "We design differentially private algorithms for the problem of online linear optimization in the full information and bandit settings with optimal $O(T^{0.5})$ regret bounds. In the full-information setting, our results demonstrate that $\\epsilon$-differential privacy may be ensured for free \u2013 in particular, the regret bounds scale as $O(T^{0.5}+1/\\epsilon)$. For bandit linear optimization, and as a special case, for non-stochastic multi-armed bandits, the proposed algorithm achieves a regret of $O(T^{0.5}/\\epsilon)$, while the previously best known regret bound was $O(T^{2/3}/\\epsilon)$.", "bibtex": "@InProceedings{pmlr-v70-agarwal17a,\n title = \t {The Price of Differential Privacy for Online Learning},\n author = {Naman Agarwal and Karan Singh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {32--40},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/agarwal17a/agarwal17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/agarwal17a.html},\n abstract = \t {We design differentially private algorithms for the problem of online linear optimization in the full information and bandit settings with optimal $O(T^{0.5})$ regret bounds. In the full-information setting, our results demonstrate that $\\epsilon$-differential privacy may be ensured for free \u2013 in particular, the regret bounds scale as $O(T^{0.5}+1/\\epsilon)$. For bandit linear optimization, and as a special case, for non-stochastic multi-armed bandits, the proposed algorithm achieves a regret of $O(T^{0.5}/\\epsilon)$, while the previously best known regret bound was $O(T^{2/3}/\\epsilon)$.}\n}", "pdf": "http://proceedings.mlr.press/v70/agarwal17a/agarwal17a.pdf", "supp": "", "pdf_size": 491857, "gs_citation": 118, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16215934207486870826&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Computer Science, Princeton University, Princeton, NJ, USA; Computer Science, Princeton University, Princeton, NJ, USA", "aff_domain": "cs.princeton.edu;cs.princeton.edu", "email": "cs.princeton.edu;cs.princeton.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/agarwal17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Princeton University", "aff_unique_dep": "Computer Science", "aff_unique_url": "https://www.princeton.edu", "aff_unique_abbr": "Princeton", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Princeton", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "The Sample Complexity of Online One-Class Collaborative Filtering", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/878", "id": "878", "author_site": "Reinhard Heckel, Kannan Ramchandran", "author": "Reinhard Heckel; Kannan Ramchandran", "abstract": "We consider the online one-class collaborative filtering (CF) problem that consist of recommending items to users over time in an online fashion based on positive ratings only. This problem arises when users respond only occasionally to a recommendation with a positive rating, and never with a negative one. We study the impact of the probability of a user responding to a recommendation, $p_f$, on the sample complexity, and ask whether receiving positive and negative ratings, instead of positive ratings only, improves the sample complexity. Both questions arise in the design of recommender systems. We introduce a simple probabilistic user model, and analyze the performance of an online user-based CF algorithm. We prove that after an initial cold start phase, where recommendations are invested in exploring the user\u2019s preferences, this algorithm makes\u2014up to a fraction of the recommendations required for updating the user\u2019s preferences\u2014perfect recommendations. The number of ratings required for the cold start phase is nearly proportional to $1/p_f$, and that for updating the user\u2019s preferences is essentially independent of $p_f$. As a consequence we find that, receiving positive and negative ratings instead of only positive ones improves the number of ratings required for initial exploration by a factor of $1/p_f$, which can be significant.", "bibtex": "@InProceedings{pmlr-v70-heckel17a,\n title = \t {The Sample Complexity of Online One-Class Collaborative Filtering},\n author = {Reinhard Heckel and Kannan Ramchandran},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1452--1460},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/heckel17a/heckel17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/heckel17a.html},\n abstract = \t {We consider the online one-class collaborative filtering (CF) problem that consist of recommending items to users over time in an online fashion based on positive ratings only. This problem arises when users respond only occasionally to a recommendation with a positive rating, and never with a negative one. We study the impact of the probability of a user responding to a recommendation, $p_f$, on the sample complexity, and ask whether receiving positive and negative ratings, instead of positive ratings only, improves the sample complexity. Both questions arise in the design of recommender systems. We introduce a simple probabilistic user model, and analyze the performance of an online user-based CF algorithm. We prove that after an initial cold start phase, where recommendations are invested in exploring the user\u2019s preferences, this algorithm makes\u2014up to a fraction of the recommendations required for updating the user\u2019s preferences\u2014perfect recommendations. The number of ratings required for the cold start phase is nearly proportional to $1/p_f$, and that for updating the user\u2019s preferences is essentially independent of $p_f$. As a consequence we find that, receiving positive and negative ratings instead of only positive ones improves the number of ratings required for initial exploration by a factor of $1/p_f$, which can be significant.}\n}", "pdf": "http://proceedings.mlr.press/v70/heckel17a/heckel17a.pdf", "supp": "", "pdf_size": 423874, "gs_citation": 22, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6437886667339830673&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "University of California, Berkeley, California, USA; University of California, Berkeley, California, USA", "aff_domain": "berkeley.edu; ", "email": "berkeley.edu; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/heckel17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of California, Berkeley", "aff_unique_dep": "", "aff_unique_url": "https://www.berkeley.edu", "aff_unique_abbr": "UC Berkeley", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Berkeley", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "The Shattered Gradients Problem: If resnets are the answer, then what is the question?", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/601", "id": "601", "author_site": "David Balduzzi, Marcus Frean, Wan-Duo Ma, Brian McWilliams, Lennox Leary, John Lewis", "author": "David Balduzzi; Marcus Frean; Lennox Leary; J. P. Lewis; Kurt Wan-Duo Ma; Brian McWilliams", "abstract": "A long-standing obstacle to progress in deep learning is the problem of vanishing and exploding gradients. Although, the problem has largely been overcome via carefully constructed initializations and batch normalization, architectures incorporating skip-connections such as highway and resnets perform much better than standard feedforward architectures despite well-chosen initialization and batch normalization. In this paper, we identify the shattered gradients problem. Specifically, we show that the correlation between gradients in standard feedforward networks decays exponentially with depth resulting in gradients that resemble white noise whereas, in contrast, the gradients in architectures with skip-connections are far more resistant to shattering, decaying sublinearly. Detailed empirical evidence is presented in support of the analysis, on both fully-connected networks and convnets. Finally, we present a new \u201clooks linear\u201d (LL) initialization that prevents shattering, with preliminary experiments showing the new initialization allows to train very deep networks without the addition of skip-connections.", "bibtex": "@InProceedings{pmlr-v70-balduzzi17b,\n title = \t {The Shattered Gradients Problem: If resnets are the answer, then what is the question?},\n author = {David Balduzzi and Marcus Frean and Lennox Leary and J. P. Lewis and Kurt Wan-Duo Ma and Brian McWilliams},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {342--350},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/balduzzi17b/balduzzi17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/balduzzi17b.html},\n abstract = \t {A long-standing obstacle to progress in deep learning is the problem of vanishing and exploding gradients. Although, the problem has largely been overcome via carefully constructed initializations and batch normalization, architectures incorporating skip-connections such as highway and resnets perform much better than standard feedforward architectures despite well-chosen initialization and batch normalization. In this paper, we identify the shattered gradients problem. Specifically, we show that the correlation between gradients in standard feedforward networks decays exponentially with depth resulting in gradients that resemble white noise whereas, in contrast, the gradients in architectures with skip-connections are far more resistant to shattering, decaying sublinearly. Detailed empirical evidence is presented in support of the analysis, on both fully-connected networks and convnets. Finally, we present a new \u201clooks linear\u201d (LL) initialization that prevents shattering, with preliminary experiments showing the new initialization allows to train very deep networks without the addition of skip-connections.}\n}", "pdf": "http://proceedings.mlr.press/v70/balduzzi17b/balduzzi17b.pdf", "supp": "", "pdf_size": 2616357, "gs_citation": 527, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5158592978697295317&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "Victoria University of Wellington, New Zealand; Victoria University of Wellington, New Zealand; Victoria University of Wellington, New Zealand; Victoria University of Wellington, New Zealand + SEED, Electronic Arts; Victoria University of Wellington, New Zealand; Disney Research, Z\u00fcrich, Switzerland", "aff_domain": "gmail.com; ; ; ; ;disneyresearch.com", "email": "gmail.com; ; ; ; ;disneyresearch.com", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/balduzzi17b.html", "aff_unique_index": "0;0;0;0+1;0;2", "aff_unique_norm": "Victoria University of Wellington;Electronic Arts;Disney Research", "aff_unique_dep": ";SEED;", "aff_unique_url": "https://www.victoria.ac.nz;https://www.ea.com;https://research.disney.com", "aff_unique_abbr": "VUW;EA;Disney Research", "aff_campus_unique_index": ";1", "aff_campus_unique": ";Z\u00fcrich", "aff_country_unique_index": "0;0;0;0+1;0;2", "aff_country_unique": "New Zealand;United States;Switzerland" }, { "title": "The Statistical Recurrent Unit", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/684", "id": "684", "author_site": "Junier Oliva, Barnab\u00e1s P\u00f3czos, Jeff Schneider", "author": "Junier B. Oliva; Barnab\u00e1s P\u00f3czos; Jeff Schneider", "abstract": "Sophisticated gated recurrent neural network architectures like LSTMs and GRUs have been shown to be highly effective in a myriad of applications. We develop an un-gated unit, the statistical recurrent unit (SRU), that is able to learn long term dependencies in data by only keeping moving averages of statistics. The SRU\u2019s architecture is simple, un-gated, and contains a comparable number of parameters to LSTMs; yet, SRUs perform favorably to more sophisticated LSTM and GRU alternatives, often outperforming one or both in various tasks. We show the efficacy of SRUs as compared to LSTMs and GRUs in an unbiased manner by optimizing respective architectures\u2019 hyperparameters for both synthetic and real-world tasks.", "bibtex": "@InProceedings{pmlr-v70-oliva17a,\n title = \t {The Statistical Recurrent Unit},\n author = {Junier B. Oliva and Barnab{\\'a}s P{\\'o}czos and Jeff Schneider},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2671--2680},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/oliva17a/oliva17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/oliva17a.html},\n abstract = \t {Sophisticated gated recurrent neural network architectures like LSTMs and GRUs have been shown to be highly effective in a myriad of applications. We develop an un-gated unit, the statistical recurrent unit (SRU), that is able to learn long term dependencies in data by only keeping moving averages of statistics. The SRU\u2019s architecture is simple, un-gated, and contains a comparable number of parameters to LSTMs; yet, SRUs perform favorably to more sophisticated LSTM and GRU alternatives, often outperforming one or both in various tasks. We show the efficacy of SRUs as compared to LSTMs and GRUs in an unbiased manner by optimizing respective architectures\u2019 hyperparameters for both synthetic and real-world tasks.}\n}", "pdf": "http://proceedings.mlr.press/v70/oliva17a/oliva17a.pdf", "supp": "", "pdf_size": 2418741, "gs_citation": 70, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=607413984526287403&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 7, "aff": "Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University; Machine Learning Department, Carnegie Mellon University", "aff_domain": "cs.cmu.edu; ; ", "email": "cs.cmu.edu; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/oliva17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "Carnegie Mellon University", "aff_unique_dep": "Machine Learning Department", "aff_unique_url": "https://www.cmu.edu", "aff_unique_abbr": "CMU", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Theoretical Properties for Neural Networks with Weight Matrices of Low Displacement Rank", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/832", "id": "832", "author_site": "Liang Zhao, Siyu Liao, Yanzhi Wang, Zhe Li, Jian Tang, Bo Yuan", "author": "Liang Zhao; Siyu Liao; Yanzhi Wang; Zhe Li; Jian Tang; Bo Yuan", "abstract": "Recently low displacement rank (LDR) matrices, or so-called structured matrices, have been proposed to compress large-scale neural networks. Empirical results have shown that neural networks with weight matrices of LDR matrices, referred as LDR neural networks, can achieve significant reduction in space and computational complexity while retaining high accuracy. This paper gives theoretical study on LDR neural networks. First, we prove the universal approximation property of LDR neural networks with a mild condition on the displacement operators. We then show that the error bounds of LDR neural networks are as efficient as general neural networks with both single-layer and multiple-layer structure. Finally, we propose back-propagation based training algorithm for general LDR neural networks.", "bibtex": "@InProceedings{pmlr-v70-zhao17b,\n title = \t {Theoretical Properties for Neural Networks with Weight Matrices of Low Displacement Rank},\n author = {Liang Zhao and Siyu Liao and Yanzhi Wang and Zhe Li and Jian Tang and Bo Yuan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4082--4090},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhao17b/zhao17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhao17b.html},\n abstract = \t {Recently low displacement rank (LDR) matrices, or so-called structured matrices, have been proposed to compress large-scale neural networks. Empirical results have shown that neural networks with weight matrices of LDR matrices, referred as LDR neural networks, can achieve significant reduction in space and computational complexity while retaining high accuracy. This paper gives theoretical study on LDR neural networks. First, we prove the universal approximation property of LDR neural networks with a mild condition on the displacement operators. We then show that the error bounds of LDR neural networks are as efficient as general neural networks with both single-layer and multiple-layer structure. Finally, we propose back-propagation based training algorithm for general LDR neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhao17b/zhao17b.pdf", "supp": "", "pdf_size": 423461, "gs_citation": 79, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16512618877685276551&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 10, "aff": "The City University of New York, New York, New York, USA+The City University of New York, New York, New York, USA; The City University of New York, New York, New York, USA+The City University of New York, New York, New York, USA; Syracuse University, Syracuse, New York, USA; Syracuse University, Syracuse, New York, USA; Syracuse University, Syracuse, New York, USA; The City University of New York, New York, New York, USA", "aff_domain": "ccny.cuny.edu; ; ; ; ;ccny.cuny.edu", "email": "ccny.cuny.edu; ; ; ; ;ccny.cuny.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/zhao17b.html", "aff_unique_index": "0+0;0+0;1;1;1;0", "aff_unique_norm": "City University of New York;Syracuse University", "aff_unique_dep": ";", "aff_unique_url": "https://www.cuny.edu;https://www.syracuse.edu", "aff_unique_abbr": "CUNY;Syracuse", "aff_campus_unique_index": "0+0;0+0;1;1;1;0", "aff_campus_unique": "New York;Syracuse", "aff_country_unique_index": "0+0;0+0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Tight Bounds for Approximate Carath\u00e9odory and Beyond", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/467", "id": "467", "author_site": "Vahab Mirrokni, Renato Leme, Adrian Vladu, Sam Wong", "author": "Vahab Mirrokni; Renato Paes Leme; Adrian Vladu; Sam Chiu-wai Wong", "abstract": "We present a deterministic nearly-linear time algorithm for approximating any point inside a convex polytope with a sparse convex combination of the polytope\u2019s vertices. Our result provides a constructive proof for the Approximate Carath\u00e9odory Problem, which states that any point inside a polytope contained in the $\\ell_p$ ball of radius $D$ can be approximated to within $\\epsilon$ in $\\ell_p$ norm by a convex combination of $O\\left(D^2 p/\\epsilon^2\\right)$ vertices of the polytope for $p \\geq 2$. While for the particular case of $p=2$, this can be achieved by the well-known Perceptron algorithm, we follow a more principled approach which generalizes to arbitrary $p\\geq 2$; furthermore, this naturally extends to domains with more complicated geometry, as it is the case for providing an approximate Birkhoff-von Neumann decomposition. Secondly, we show that the sparsity bound is tight for $\\ell_p$ norms, using an argument based on anti-concentration for the binomial distribution, thus resolving an open question posed by Barman. Experimentally, we verify that our deterministic optimization-based algorithms achieve in practice much better sparsity than previously known sampling-based algorithms. We also show how to apply our techniques to SVM training and rounding fractional points in matroid and flow polytopes.", "bibtex": "@InProceedings{pmlr-v70-mirrokni17a,\n title = \t {Tight Bounds for Approximate {C}arath{\\'e}odory and Beyond},\n author = {Vahab Mirrokni and Renato Paes Leme and Adrian Vladu and Sam Chiu-wai Wong},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2440--2448},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mirrokni17a/mirrokni17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mirrokni17a.html},\n abstract = \t {We present a deterministic nearly-linear time algorithm for approximating any point inside a convex polytope with a sparse convex combination of the polytope\u2019s vertices. Our result provides a constructive proof for the Approximate Carath\u00e9odory Problem, which states that any point inside a polytope contained in the $\\ell_p$ ball of radius $D$ can be approximated to within $\\epsilon$ in $\\ell_p$ norm by a convex combination of $O\\left(D^2 p/\\epsilon^2\\right)$ vertices of the polytope for $p \\geq 2$. While for the particular case of $p=2$, this can be achieved by the well-known Perceptron algorithm, we follow a more principled approach which generalizes to arbitrary $p\\geq 2$; furthermore, this naturally extends to domains with more complicated geometry, as it is the case for providing an approximate Birkhoff-von Neumann decomposition. Secondly, we show that the sparsity bound is tight for $\\ell_p$ norms, using an argument based on anti-concentration for the binomial distribution, thus resolving an open question posed by Barman. Experimentally, we verify that our deterministic optimization-based algorithms achieve in practice much better sparsity than previously known sampling-based algorithms. We also show how to apply our techniques to SVM training and rounding fractional points in matroid and flow polytopes.}\n}", "pdf": "http://proceedings.mlr.press/v70/mirrokni17a/mirrokni17a.pdf", "supp": "", "pdf_size": 773154, "gs_citation": 41, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=818601346488204756&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Google Research, New York, NY, USA; Google Research, New York, NY, USA; MIT, Cambridge, MA, USA; UC Berkeley, Berkeley, CA, USA", "aff_domain": "google.com;google.com;mit.edu;berkeley.edu", "email": "google.com;google.com;mit.edu;berkeley.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/mirrokni17a.html", "aff_unique_index": "0;0;1;2", "aff_unique_norm": "Google;Massachusetts Institute of Technology;University of California, Berkeley", "aff_unique_dep": "Google Research;;", "aff_unique_url": "https://research.google;https://web.mit.edu;https://www.berkeley.edu", "aff_unique_abbr": "Google Research;MIT;UC Berkeley", "aff_campus_unique_index": "0;0;1;2", "aff_campus_unique": "New York;Cambridge;Berkeley", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Toward Controlled Generation of Text", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/816", "id": "816", "author_site": "Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, Eric Xing", "author": "Zhiting Hu; Zichao Yang; Xiaodan Liang; Ruslan Salakhutdinov; Eric P. Xing", "abstract": "Generic generation and manipulation of text is challenging and has limited success compared to recent deep generative modeling in visual domain. This paper aims at generating plausible text sentences, whose attributes are controlled by learning disentangled latent representations with designated semantics. We propose a new neural generative model which combines variational auto-encoders (VAEs) and holistic attribute discriminators for effective imposition of semantic structures. The model can alternatively be seen as enhancing VAEs with the wake-sleep algorithm for leveraging fake samples as extra training data. With differentiable approximation to discrete text samples, explicit constraints on independent attribute controls, and efficient collaborative learning of generator and discriminators, our model learns interpretable representations from even only word annotations, and produces short sentences with desired attributes of sentiment and tenses. Quantitative experiments using trained classifiers as evaluators validate the accuracy of sentence and attribute generation.", "bibtex": "@InProceedings{pmlr-v70-hu17e,\n title = \t {Toward Controlled Generation of Text},\n author = {Zhiting Hu and Zichao Yang and Xiaodan Liang and Ruslan Salakhutdinov and Eric P. Xing},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1587--1596},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/hu17e/hu17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/hu17e.html},\n abstract = \t {Generic generation and manipulation of text is challenging and has limited success compared to recent deep generative modeling in visual domain. This paper aims at generating plausible text sentences, whose attributes are controlled by learning disentangled latent representations with designated semantics. We propose a new neural generative model which combines variational auto-encoders (VAEs) and holistic attribute discriminators for effective imposition of semantic structures. The model can alternatively be seen as enhancing VAEs with the wake-sleep algorithm for leveraging fake samples as extra training data. With differentiable approximation to discrete text samples, explicit constraints on independent attribute controls, and efficient collaborative learning of generator and discriminators, our model learns interpretable representations from even only word annotations, and produces short sentences with desired attributes of sentiment and tenses. Quantitative experiments using trained classifiers as evaluators validate the accuracy of sentence and attribute generation.}\n}", "pdf": "http://proceedings.mlr.press/v70/hu17e/hu17e.pdf", "supp": "", "pdf_size": 473724, "gs_citation": 1212, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14533919283203963154&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Carnegie Mellon University+Petuum, Inc.; Carnegie Mellon University; Carnegie Mellon University+Petuum, Inc.; Carnegie Mellon University; Carnegie Mellon University+Petuum, Inc.", "aff_domain": "cs.cmu.edu; ; ; ; ", "email": "cs.cmu.edu; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/hu17e.html", "aff_unique_index": "0+1;0;0+1;0;0+1", "aff_unique_norm": "Carnegie Mellon University;Petuum, Inc.", "aff_unique_dep": ";", "aff_unique_url": "https://www.cmu.edu;https://www.petuum.com", "aff_unique_abbr": "CMU;Petuum", "aff_campus_unique_index": ";;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0+0;0;0+0", "aff_country_unique": "United States" }, { "title": "Toward Efficient and Accurate Covariance Matrix Estimation on Compressed Data", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/473", "id": "473", "author_site": "Xixian Chen, Michael Lyu, Irwin King", "author": "Xixian Chen; Michael R. Lyu; Irwin King", "abstract": "Estimating covariance matrices is a fundamental technique in various domains, most notably in machine learning and signal processing. To tackle the challenges of extensive communication costs, large storage capacity requirements, and high processing time complexity when handling massive high-dimensional and distributed data, we propose an efficient and accurate covariance matrix estimation method via data compression. In contrast to previous data-oblivious compression schemes, we leverage a data-aware weighted sampling method to construct low-dimensional data for such estimation. We rigorously prove that our proposed estimator is unbiased and requires smaller data to achieve the same accuracy with specially designed sampling distributions. Besides, we depict that the computational procedures in our algorithm are efficient. All achievements imply an improved tradeoff between the estimation accuracy and computational costs. Finally, the extensive experiments on synthetic and real-world datasets validate the superior property of our method and illustrate that it significantly outperforms the state-of-the-art algorithms.", "bibtex": "@InProceedings{pmlr-v70-chen17g,\n title = \t {Toward Efficient and Accurate Covariance Matrix Estimation on Compressed Data},\n author = {Xixian Chen and Michael R. Lyu and Irwin King},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {767--776},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chen17g/chen17g.pdf},\n url = \t {https://proceedings.mlr.press/v70/chen17g.html},\n abstract = \t {Estimating covariance matrices is a fundamental technique in various domains, most notably in machine learning and signal processing. To tackle the challenges of extensive communication costs, large storage capacity requirements, and high processing time complexity when handling massive high-dimensional and distributed data, we propose an efficient and accurate covariance matrix estimation method via data compression. In contrast to previous data-oblivious compression schemes, we leverage a data-aware weighted sampling method to construct low-dimensional data for such estimation. We rigorously prove that our proposed estimator is unbiased and requires smaller data to achieve the same accuracy with specially designed sampling distributions. Besides, we depict that the computational procedures in our algorithm are efficient. All achievements imply an improved tradeoff between the estimation accuracy and computational costs. Finally, the extensive experiments on synthetic and real-world datasets validate the superior property of our method and illustrate that it significantly outperforms the state-of-the-art algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/chen17g/chen17g.pdf", "supp": "", "pdf_size": 594673, "gs_citation": 20, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15926173271568396684&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen, China + Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong; Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen, China + Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong; Shenzhen Research Institute, The Chinese University of Hong Kong, Shenzhen, China + Department of Computer Science and Engineering, The Chinese University of Hong Kong, Shatin, N.T., Hong Kong", "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/chen17g.html", "aff_unique_index": "0+0;0+0;0+0", "aff_unique_norm": "Chinese University of Hong Kong", "aff_unique_dep": "Shenzhen Research Institute", "aff_unique_url": "https://www.cuhk.edu.cn", "aff_unique_abbr": "CUHK", "aff_campus_unique_index": "0+1;0+1;0+1", "aff_campus_unique": "Shenzhen;Hong Kong SAR", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "China" }, { "title": "Towards K-means-friendly Spaces: Simultaneous Deep Learning and Clustering", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/738", "id": "738", "author_site": "Bo Yang, Xiao Fu, Nicholas Sidiropoulos, Mingyi Hong", "author": "Bo Yang; Xiao Fu; Nicholas D. Sidiropoulos; Mingyi Hong", "abstract": "Most learning approaches treat dimensionality reduction (DR) and clustering separately (i.e., sequentially), but recent research has shown that optimizing the two tasks jointly can substantially improve the performance of both. The premise behind the latter genre is that the data samples are obtained via linear transformation of latent representations that are easy to cluster; but in practice, the transformation from the latent space to the data can be more complicated. In this work, we assume that this transformation is an unknown and possibly nonlinear function. To recover the `clustering-friendly\u2019 latent representations and to better cluster the data, we propose a joint DR and K-means clustering approach in which DR is accomplished via learning a deep neural network (DNN). The motivation is to keep the advantages of jointly optimizing the two tasks, while exploiting the deep neural network\u2019s ability to approximate any nonlinear function. This way, the proposed approach can work well for a broad class of generative models. Towards this end, we carefully design the DNN structure and the associated joint optimization criterion, and propose an effective and scalable algorithm to handle the formulated optimization problem. Experiments using different real datasets are employed to showcase the effectiveness of the proposed approach.", "bibtex": "@InProceedings{pmlr-v70-yang17b,\n title = \t {Towards K-means-friendly Spaces: Simultaneous Deep Learning and Clustering},\n author = {Bo Yang and Xiao Fu and Nicholas D. Sidiropoulos and Mingyi Hong},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3861--3870},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/yang17b/yang17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/yang17b.html},\n abstract = \t {Most learning approaches treat dimensionality reduction (DR) and clustering separately (i.e., sequentially), but recent research has shown that optimizing the two tasks jointly can substantially improve the performance of both. The premise behind the latter genre is that the data samples are obtained via linear transformation of latent representations that are easy to cluster; but in practice, the transformation from the latent space to the data can be more complicated. In this work, we assume that this transformation is an unknown and possibly nonlinear function. To recover the `clustering-friendly\u2019 latent representations and to better cluster the data, we propose a joint DR and K-means clustering approach in which DR is accomplished via learning a deep neural network (DNN). The motivation is to keep the advantages of jointly optimizing the two tasks, while exploiting the deep neural network\u2019s ability to approximate any nonlinear function. This way, the proposed approach can work well for a broad class of generative models. Towards this end, we carefully design the DNN structure and the associated joint optimization criterion, and propose an effective and scalable algorithm to handle the formulated optimization problem. Experiments using different real datasets are employed to showcase the effectiveness of the proposed approach.}\n}", "pdf": "http://proceedings.mlr.press/v70/yang17b/yang17b.pdf", "supp": "", "pdf_size": 872341, "gs_citation": 1220, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6378727589755380003&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Department of Electrical and Computer Engineering, University of Minnesota, Minneapolis MN 55455, USA; Department of Electrical and Computer Engineering, University of Minnesota, Minneapolis MN 55455, USA; Department of Electrical and Computer Engineering, University of Minnesota, Minneapolis MN 55455, USA; Department of Industrial and Manufacturing Systems Engineering, Iowa State University, Ames, IA 50011, USA", "aff_domain": "umn.edu;umn.edu;ece.um.edu;iastate.edu", "email": "umn.edu;umn.edu;ece.um.edu;iastate.edu", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/yang17b.html", "aff_unique_index": "0;0;0;1", "aff_unique_norm": "University of Minnesota;Iowa State University", "aff_unique_dep": "Department of Electrical and Computer Engineering;Department of Industrial and Manufacturing Systems Engineering", "aff_unique_url": "https://www.umn.edu;https://www.iastate.edu", "aff_unique_abbr": "UMN;ISU", "aff_campus_unique_index": "0;0;0;1", "aff_campus_unique": "Minneapolis;Ames", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Tunable Efficient Unitary Neural Networks (EUNN) and their application to RNNs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/813", "id": "813", "author_site": "Li Jing, Yichen Shen, Tena Dubcek, John E Peurifoy, Scott Skirlo, Yann LeCun, Max Tegmark, Marin Solja\\v{c}i\\'{c}", "author": "Li Jing; Yichen Shen; Tena Dubcek; John Peurifoy; Scott Skirlo; Yann LeCun; Max Tegmark; Marin Solja\u010di\u0107", "abstract": "Using unitary (instead of general) matrices in artificial neural networks (ANNs) is a promising way to solve the gradient explosion/vanishing problem, as well as to enable ANNs to learn long-term correlations in the data. This approach appears particularly promising for Recurrent Neural Networks (RNNs). In this work, we present a new architecture for implementing an Efficient Unitary Neural Network (EUNNs); its main advantages can be summarized as follows. Firstly, the representation capacity of the unitary space in an EUNN is fully tunable, ranging from a subspace of SU(N) to the entire unitary space. Secondly, the computational complexity for training an EUNN is merely $\\mathcal{O}(1)$ per parameter. Finally, we test the performance of EUNNs on the standard copying task, the pixel-permuted MNIST digit recognition benchmark as well as the Speech Prediction Test (TIMIT). We find that our architecture significantly outperforms both other state-of-the-art unitary RNNs and the LSTM architecture, in terms of the final performance and/or the wall-clock training speed. EUNNs are thus promising alternatives to RNNs and LSTMs for a wide variety of applications.", "bibtex": "@InProceedings{pmlr-v70-jing17a,\n title = \t {Tunable Efficient Unitary Neural Networks ({EUNN}) and their application to {RNN}s},\n author = {Li Jing and Yichen Shen and Tena Dubcek and John Peurifoy and Scott Skirlo and Yann LeCun and Max Tegmark and Marin Solja{\\v{c}}i{\\'c}},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1733--1741},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jing17a/jing17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/jing17a.html},\n abstract = \t {Using unitary (instead of general) matrices in artificial neural networks (ANNs) is a promising way to solve the gradient explosion/vanishing problem, as well as to enable ANNs to learn long-term correlations in the data. This approach appears particularly promising for Recurrent Neural Networks (RNNs). In this work, we present a new architecture for implementing an Efficient Unitary Neural Network (EUNNs); its main advantages can be summarized as follows. Firstly, the representation capacity of the unitary space in an EUNN is fully tunable, ranging from a subspace of SU(N) to the entire unitary space. Secondly, the computational complexity for training an EUNN is merely $\\mathcal{O}(1)$ per parameter. Finally, we test the performance of EUNNs on the standard copying task, the pixel-permuted MNIST digit recognition benchmark as well as the Speech Prediction Test (TIMIT). We find that our architecture significantly outperforms both other state-of-the-art unitary RNNs and the LSTM architecture, in terms of the final performance and/or the wall-clock training speed. EUNNs are thus promising alternatives to RNNs and LSTMs for a wide variety of applications.}\n}", "pdf": "http://proceedings.mlr.press/v70/jing17a/jing17a.pdf", "supp": "", "pdf_size": 2403083, "gs_citation": 232, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16889639081296570861&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 12, "aff": ";;;;;;;", "aff_domain": ";;;;;;;", "email": ";;;;;;;", "github": "", "project": "", "author_num": 8, "oa": "https://proceedings.mlr.press/v70/jing17a.html" }, { "title": "Uncertainty Assessment and False Discovery Rate Control in High-Dimensional Granger Causal Inference", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/479", "id": "479", "author_site": "Aditya Chaudhry, Pan Xu, Quanquan Gu", "author": "Aditya Chaudhry; Pan Xu; Quanquan Gu", "abstract": "Causal inference among high-dimensional time series data proves an important research problem in many fields. While in the classical regime one often establishes causality among time series via a concept known as \u201cGranger causality,\u201d existing approaches for Granger causal inference in high-dimensional data lack the means to characterize the uncertainty associated with Granger causality estimates (e.g., p-values and confidence intervals). We make two contributions in this work. First, we introduce a novel asymptotically unbiased Granger causality estimator with corresponding test statistics and confidence intervals to allow, for the first time, uncertainty characterization in high-dimensional Granger causal inference. Second, we introduce a novel method for false discovery rate control that achieves higher power in multiple testing than existing techniques and that can cope with dependent test statistics and dependent observations. We corroborate our theoretical results with experiments on both synthetic data and real-world climatological data.", "bibtex": "@InProceedings{pmlr-v70-chaudhry17a,\n title = \t {Uncertainty Assessment and False Discovery Rate Control in High-Dimensional {G}ranger Causal Inference},\n author = {Aditya Chaudhry and Pan Xu and Quanquan Gu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {684--693},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/chaudhry17a/chaudhry17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/chaudhry17a.html},\n abstract = \t {Causal inference among high-dimensional time series data proves an important research problem in many fields. While in the classical regime one often establishes causality among time series via a concept known as \u201cGranger causality,\u201d existing approaches for Granger causal inference in high-dimensional data lack the means to characterize the uncertainty associated with Granger causality estimates (e.g., p-values and confidence intervals). We make two contributions in this work. First, we introduce a novel asymptotically unbiased Granger causality estimator with corresponding test statistics and confidence intervals to allow, for the first time, uncertainty characterization in high-dimensional Granger causal inference. Second, we introduce a novel method for false discovery rate control that achieves higher power in multiple testing than existing techniques and that can cope with dependent test statistics and dependent observations. We corroborate our theoretical results with experiments on both synthetic data and real-world climatological data.}\n}", "pdf": "http://proceedings.mlr.press/v70/chaudhry17a/chaudhry17a.pdf", "supp": "", "pdf_size": 839790, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=32630767836162440&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Department of Mathematics, University of Virginia, Charlottesville, VA 22904, USA; Department of Computer Science, University of Virginia, Charlottesville, VA 22904, USA; Department of Computer Science, University of Virginia, Charlottesville, VA 22904, USA", "aff_domain": "virginia.edu; ;virginia.edu", "email": "virginia.edu; ;virginia.edu", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/chaudhry17a.html", "aff_unique_index": "0;0;0", "aff_unique_norm": "University of Virginia", "aff_unique_dep": "Department of Mathematics", "aff_unique_url": "https://www.virginia.edu", "aff_unique_abbr": "UVA", "aff_campus_unique_index": "0;0;0", "aff_campus_unique": "Charlottesville", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Uncorrelation and Evenness: a New Diversity-Promoting Regularizer", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/491", "id": "491", "author_site": "Pengtao Xie, Aarti Singh, Eric Xing", "author": "Pengtao Xie; Aarti Singh; Eric P. Xing", "abstract": "Latent space models (LSMs) provide a principled and effective way to extract hidden patterns from observed data. To cope with two challenges in LSMs: (1) how to capture infrequent patterns when pattern frequency is imbalanced and (2) how to reduce model size without sacrificing their expressiveness, several studies have been proposed to \u201cdiversify\u201d LSMs, which design regularizers to encourage the components therein to be \u201cdiverse\u201d. In light of the limitations of existing approaches, we design a new diversity-promoting regularizer by considering two factors: uncorrelation and evenness, which encourage the components to be uncorrelated and to play equally important roles in modeling data. Formally, this amounts to encouraging the covariance matrix of the components to have more uniform eigenvalues. We apply the regularizer to two LSMs and develop an efficient optimization algorithm. Experiments on healthcare, image and text data demonstrate the effectiveness of the regularizer.", "bibtex": "@InProceedings{pmlr-v70-xie17b,\n title = \t {Uncorrelation and Evenness: a New Diversity-Promoting Regularizer},\n author = {Pengtao Xie and Aarti Singh and Eric P. Xing},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3811--3820},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/xie17b/xie17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/xie17b.html},\n abstract = \t {Latent space models (LSMs) provide a principled and effective way to extract hidden patterns from observed data. To cope with two challenges in LSMs: (1) how to capture infrequent patterns when pattern frequency is imbalanced and (2) how to reduce model size without sacrificing their expressiveness, several studies have been proposed to \u201cdiversify\u201d LSMs, which design regularizers to encourage the components therein to be \u201cdiverse\u201d. In light of the limitations of existing approaches, we design a new diversity-promoting regularizer by considering two factors: uncorrelation and evenness, which encourage the components to be uncorrelated and to play equally important roles in modeling data. Formally, this amounts to encouraging the covariance matrix of the components to have more uniform eigenvalues. We apply the regularizer to two LSMs and develop an efficient optimization algorithm. Experiments on healthcare, image and text data demonstrate the effectiveness of the regularizer.}\n}", "pdf": "http://proceedings.mlr.press/v70/xie17b/xie17b.pdf", "supp": "", "pdf_size": 763775, "gs_citation": 45, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4875274129428893296&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Machine Learning Department, Carnegie Mellon University+Petuum Inc.; Machine Learning Department, Carnegie Mellon University; Petuum Inc.", "aff_domain": "cs.cmu.edu; ;petuum.com", "email": "cs.cmu.edu; ;petuum.com", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/xie17b.html", "aff_unique_index": "0+1;0;1", "aff_unique_norm": "Carnegie Mellon University;Petuum Inc.", "aff_unique_dep": "Machine Learning Department;", "aff_unique_url": "https://www.cmu.edu;https://www.petuum.com", "aff_unique_abbr": "CMU;", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0;0", "aff_country_unique": "United States" }, { "title": "Uncovering Causality from Multivariate Hawkes Integrated Cumulants", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/673", "id": "673", "author_site": "Massil Achab, Emmanuel Bacry, St\u00e9phane Ga\u00efffas, Iacopo Mastromatteo, Jean-Fran\u00e7ois Muzy", "author": "Massil Achab; Emmanuel Bacry; St\u00e9phane Ga\u0131\u0308ffas; Iacopo Mastromatteo; Jean-Fran\u00e7ois Muzy", "abstract": "We design a new nonparametric method that allows one to estimate the matrix of integrated kernels of a multivariate Hawkes process. This matrix not only encodes the mutual influences of each node of the process, but also disentangles the causality relationships between them. Our approach is the first that leads to an estimation of this matrix", "bibtex": "@InProceedings{pmlr-v70-achab17a,\n title = \t {Uncovering Causality from Multivariate {H}awkes Integrated Cumulants},\n author = {Massil Achab and Emmanuel Bacry and St{\\'e}phane Ga\\\"{\\i}ffas and Iacopo Mastromatteo and Jean-Fran{\\c{c}}ois Muzy},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1--10},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/achab17a/achab17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/achab17a.html},\n abstract = \t {We design a new nonparametric method that allows one to estimate the matrix of integrated kernels of a multivariate Hawkes process. This matrix not only encodes the mutual influences of each node of the process, but also disentangles the causality relationships between them. Our approach is the first that leads to an estimation of this matrix", "pdf": "http://proceedings.mlr.press/v70/achab17a/achab17a.pdf", "supp": "", "pdf_size": 582895, "gs_citation": 121, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16886737489805400177&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Ecole Polytechnique, Palaiseau, France; Ecole Polytechnique, Palaiseau, France; Ecole Polytechnique, Palaiseau, France; Capital Fund Management, Paris, France; Universit\u00e9 de Corse, Corte, France", "aff_domain": "m4x.org; ; ; ; ", "email": "m4x.org; ; ; ; ", "github": "", "project": "", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/achab17a.html", "aff_unique_index": "0;0;0;1;2", "aff_unique_norm": "Ecole Polytechnique;Capital Fund Management;Universit\u00e9 de Corse", "aff_unique_dep": ";;", "aff_unique_url": "https://www.ec-polytechnique.fr;;https://www.universitedecorse.fr", "aff_unique_abbr": "X;;", "aff_campus_unique_index": "0;0;0;2", "aff_campus_unique": "Palaiseau;;Corte", "aff_country_unique_index": "0;0;0;0;0", "aff_country_unique": "France" }, { "title": "Understanding Black-box Predictions via Influence Functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/678", "id": "678", "author_site": "Pang Wei Koh, Percy Liang", "author": "Pang Wei Koh; Percy Liang", "abstract": "How can we explain the predictions of a black-box model? In this paper, we use influence functions \u2014 a classic technique from robust statistics \u2014 to trace a model\u2019s prediction through the learning algorithm and back to its training data, thereby identifying training points most responsible for a given prediction. To scale up influence functions to modern machine learning settings, we develop a simple, efficient implementation that requires only oracle access to gradients and Hessian-vector products. We show that even on non-convex and non-differentiable models where the theory breaks down, approximations to influence functions can still provide valuable information. On linear models and convolutional neural networks, we demonstrate that influence functions are useful for multiple purposes: understanding model behavior, debugging models, detecting dataset errors, and even creating visually-indistinguishable training-set attacks.", "bibtex": "@InProceedings{pmlr-v70-koh17a,\n title = \t {Understanding Black-box Predictions via Influence Functions},\n author = {Pang Wei Koh and Percy Liang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1885--1894},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/koh17a/koh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/koh17a.html},\n abstract = \t {How can we explain the predictions of a black-box model? In this paper, we use influence functions \u2014 a classic technique from robust statistics \u2014 to trace a model\u2019s prediction through the learning algorithm and back to its training data, thereby identifying training points most responsible for a given prediction. To scale up influence functions to modern machine learning settings, we develop a simple, efficient implementation that requires only oracle access to gradients and Hessian-vector products. We show that even on non-convex and non-differentiable models where the theory breaks down, approximations to influence functions can still provide valuable information. On linear models and convolutional neural networks, we demonstrate that influence functions are useful for multiple purposes: understanding model behavior, debugging models, detecting dataset errors, and even creating visually-indistinguishable training-set attacks.}\n}", "pdf": "http://proceedings.mlr.press/v70/koh17a/koh17a.pdf", "supp": "", "pdf_size": 4815350, "gs_citation": 3615, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3459384850898992895&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 20, "aff": "Stanford University; Stanford University", "aff_domain": "cs.stanford.edu;cs.stanford.edu", "email": "cs.stanford.edu;cs.stanford.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/koh17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Understanding Synthetic Gradients and Decoupled Neural Interfaces", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/719", "id": "719", "author_site": "Wojciech Czarnecki, Grzegorz \u015awirszcz, Max Jaderberg, Simon Osindero, Oriol Vinyals, Koray Kavukcuoglu", "author": "Wojciech Marian Czarnecki; Grzegorz \u015awirszcz; Max Jaderberg; Simon Osindero; Oriol Vinyals; Koray Kavukcuoglu", "abstract": "When training neural networks, the use of Synthetic Gradients (SG) allows layers or modules to be trained without update locking \u2013 without waiting for a true error gradient to be backpropagated \u2013 resulting in Decoupled Neural Interfaces (DNIs). This unlocked ability of being able to update parts of a neural network asynchronously and with only local information was demonstrated to work empirically in Jaderberg et al (2016). However, there has been very little demonstration of what changes DNIs and SGs impose from a functional, representational, and learning dynamics point of view. In this paper, we study DNIs through the use of synthetic gradients on feed-forward networks to better understand their behaviour and elucidate their effect on optimisation. We show that the incorporation of SGs does not affect the representational strength of the learning system for a neural network, and prove the convergence of the learning system for linear and deep linear models. On practical problems we investigate the mechanism by which synthetic gradient estimators approximate the true loss, and, surprisingly, how that leads to drastically different layer-wise representations. Finally, we also expose the relationship of using synthetic gradients to other error approximation techniques and find a unifying language for discussion and comparison.", "bibtex": "@InProceedings{pmlr-v70-czarnecki17a,\n title = \t {Understanding Synthetic Gradients and Decoupled Neural Interfaces},\n author = {Wojciech Marian Czarnecki and Grzegorz {\\'{S}}wirszcz and Max Jaderberg and Simon Osindero and Oriol Vinyals and Koray Kavukcuoglu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {904--912},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/czarnecki17a/czarnecki17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/czarnecki17a.html},\n abstract = \t {When training neural networks, the use of Synthetic Gradients (SG) allows layers or modules to be trained without update locking \u2013 without waiting for a true error gradient to be backpropagated \u2013 resulting in Decoupled Neural Interfaces (DNIs). This unlocked ability of being able to update parts of a neural network asynchronously and with only local information was demonstrated to work empirically in Jaderberg et al (2016). However, there has been very little demonstration of what changes DNIs and SGs impose from a functional, representational, and learning dynamics point of view. In this paper, we study DNIs through the use of synthetic gradients on feed-forward networks to better understand their behaviour and elucidate their effect on optimisation. We show that the incorporation of SGs does not affect the representational strength of the learning system for a neural network, and prove the convergence of the learning system for linear and deep linear models. On practical problems we investigate the mechanism by which synthetic gradient estimators approximate the true loss, and, surprisingly, how that leads to drastically different layer-wise representations. Finally, we also expose the relationship of using synthetic gradients to other error approximation techniques and find a unifying language for discussion and comparison.}\n}", "pdf": "http://proceedings.mlr.press/v70/czarnecki17a/czarnecki17a.pdf", "supp": "", "pdf_size": 966135, "gs_citation": 94, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3057918163227185685&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom; DeepMind, London, United Kingdom", "aff_domain": "google.com; ; ; ; ; ", "email": "google.com; ; ; ; ; ", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/czarnecki17a.html", "aff_unique_index": "0;0;0;0;0;0", "aff_unique_norm": "DeepMind", "aff_unique_dep": "", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "id": "e8550b0960", "title": "Understanding the Representation and Computation of Multilayer Perceptrons: A Case Study in Speech Recognition", "site": "https://proceedings.mlr.press/v70/nagamine17a.html", "author": "Tasha Nagamine; Nima Mesgarani", "abstract": "Despite the recent success of deep learning, the nature of the transformations they apply to the input features remains poorly understood. This study provides an empirical framework to study the encoding properties of node activations in various layers of the network, and to construct the exact function applied to each data point in the form of a linear transform. These methods are used to discern and quantify properties of feed-forward neural networks trained to map acoustic features to phoneme labels. We show a selective and nonlinear warping of the feature space, achieved by forming prototypical functions to account for the possible variation of each class. This study provides a joint framework where the properties of node activations and the functions implemented by the network can be linked together.", "bibtex": "@InProceedings{pmlr-v70-nagamine17a,\n title = \t {Understanding the Representation and Computation of Multilayer Perceptrons: A Case Study in Speech Recognition},\n author = {Tasha Nagamine and Nima Mesgarani},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2564--2573},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/nagamine17a/nagamine17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/nagamine17a.html},\n abstract = \t {Despite the recent success of deep learning, the nature of the transformations they apply to the input features remains poorly understood. This study provides an empirical framework to study the encoding properties of node activations in various layers of the network, and to construct the exact function applied to each data point in the form of a linear transform. These methods are used to discern and quantify properties of feed-forward neural networks trained to map acoustic features to phoneme labels. We show a selective and nonlinear warping of the feature space, achieved by forming prototypical functions to account for the possible variation of each class. This study provides a joint framework where the properties of node activations and the functions implemented by the network can be linked together.}\n}", "pdf": "http://proceedings.mlr.press/v70/nagamine17a/nagamine17a.pdf", "supp": "", "pdf_size": 3944017, "gs_citation": 40, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14497836121228043361&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Columbia University, New York, NY, USA; Columbia University, New York, NY, USA", "aff_domain": "ee.columbia.edu;ee.columbia.edu", "email": "ee.columbia.edu;ee.columbia.edu", "github": "", "project": "", "author_num": 2, "aff_unique_index": "0;0", "aff_unique_norm": "Columbia University", "aff_unique_dep": "", "aff_unique_url": "https://www.columbia.edu", "aff_unique_abbr": "Columbia", "aff_campus_unique_index": "0;0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Uniform Convergence Rates for Kernel Density Estimation", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/545", "id": "545", "author": "Heinrich Jiang", "abstract": "Kernel density estimation (KDE) is a popular nonparametric density estimation method. We (1) derive finite-sample high-probability density estimation bounds for multivariate KDE under mild density assumptions which hold uniformly in $x \\in \\mathbb{R}^d$ and bandwidth matrices. We apply these results to (2) mode, (3) density level set, and (4) class probability estimation and attain optimal rates up to logarithmic factors. We then (5) provide an extension of our results under the manifold hypothesis. Finally, we (6) give uniform convergence results for local intrinsic dimension estimation.", "bibtex": "@InProceedings{pmlr-v70-jiang17b,\n title = \t {Uniform Convergence Rates for Kernel Density Estimation},\n author = {Heinrich Jiang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1694--1703},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/jiang17b/jiang17b.pdf},\n url = \t {https://proceedings.mlr.press/v70/jiang17b.html},\n abstract = \t {Kernel density estimation (KDE) is a popular nonparametric density estimation method. We (1) derive finite-sample high-probability density estimation bounds for multivariate KDE under mild density assumptions which hold uniformly in $x \\in \\mathbb{R}^d$ and bandwidth matrices. We apply these results to (2) mode, (3) density level set, and (4) class probability estimation and attain optimal rates up to logarithmic factors. We then (5) provide an extension of our results under the manifold hypothesis. Finally, we (6) give uniform convergence results for local intrinsic dimension estimation.}\n}", "pdf": "http://proceedings.mlr.press/v70/jiang17b/jiang17b.pdf", "supp": "", "pdf_size": 330970, "gs_citation": 114, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15971955451978569325&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 4, "aff": "Google", "aff_domain": "gmail.com", "email": "gmail.com", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/jiang17b.html", "aff_unique_index": "0", "aff_unique_norm": "Google", "aff_unique_dep": "Google", "aff_unique_url": "https://www.google.com", "aff_unique_abbr": "Google", "aff_campus_unique_index": "0", "aff_campus_unique": "Mountain View", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Uniform Deviation Bounds for k-Means Clustering", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/523", "id": "523", "author_site": "Olivier Bachem, Mario Lucic, Hamed Hassani, Andreas Krause", "author": "Olivier Bachem; Mario Lucic; S. Hamed Hassani; Andreas Krause", "abstract": "Uniform deviation bounds limit the difference between a model\u2019s expected loss and its loss on an empirical sample", "bibtex": "@InProceedings{pmlr-v70-bachem17a,\n title = \t {Uniform Deviation Bounds for k-Means Clustering},\n author = {Olivier Bachem and Mario Lucic and S. Hamed Hassani and Andreas Krause},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {283--291},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bachem17a/bachem17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bachem17a.html},\n abstract = \t {Uniform deviation bounds limit the difference between a model\u2019s expected loss and its loss on an empirical sample", "pdf": "http://proceedings.mlr.press/v70/bachem17a/bachem17a.pdf", "supp": "", "pdf_size": 568471, "gs_citation": 27, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10553155994481438714&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 8, "aff": "Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich; Department of Computer Science, ETH Zurich", "aff_domain": "inf.ethz.ch; ; ; ", "email": "inf.ethz.ch; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/bachem17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "ETH Zurich", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.ethz.ch", "aff_unique_abbr": "ETHZ", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "Switzerland" }, { "title": "Unifying Task Specification in Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/461", "id": "461", "author": "Martha White", "abstract": "Reinforcement learning tasks are typically specified as Markov decision processes. This formalism has been highly successful, though specifications often couple the dynamics of the environment and the learning objective. This lack of modularity can complicate generalization of the task specification, as well as obfuscate connections between different task settings, such as episodic and continuing. In this work, we introduce the RL task formalism, that provides a unification through simple constructs including a generalization to transition-based discounting. Through a series of examples, we demonstrate the generality and utility of this formalism. Finally, we extend standard learning constructs, including Bellman operators, and extend some seminal theoretical results, including approximation errors bounds. Overall, we provide a well-understood and sound formalism on which to build theoretical results and simplify algorithm use and development.", "bibtex": "@InProceedings{pmlr-v70-white17a,\n title = \t {Unifying Task Specification in Reinforcement Learning},\n author = {Martha White},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3742--3750},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/white17a/white17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/white17a.html},\n abstract = \t {Reinforcement learning tasks are typically specified as Markov decision processes. This formalism has been highly successful, though specifications often couple the dynamics of the environment and the learning objective. This lack of modularity can complicate generalization of the task specification, as well as obfuscate connections between different task settings, such as episodic and continuing. In this work, we introduce the RL task formalism, that provides a unification through simple constructs including a generalization to transition-based discounting. Through a series of examples, we demonstrate the generality and utility of this formalism. Finally, we extend standard learning constructs, including Bellman operators, and extend some seminal theoretical results, including approximation errors bounds. Overall, we provide a well-understood and sound formalism on which to build theoretical results and simplify algorithm use and development.}\n}", "pdf": "http://proceedings.mlr.press/v70/white17a/white17a.pdf", "supp": "", "pdf_size": 552378, "gs_citation": 122, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2276051739133493175&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Department of Computer Science, Indiana University", "aff_domain": "indiana.edu", "email": "indiana.edu", "github": "", "project": "", "author_num": 1, "oa": "https://proceedings.mlr.press/v70/white17a.html", "aff_unique_index": "0", "aff_unique_norm": "Indiana University", "aff_unique_dep": "Department of Computer Science", "aff_unique_url": "https://www.indiana.edu", "aff_unique_abbr": "IU", "aff_country_unique_index": "0", "aff_country_unique": "United States" }, { "title": "Unimodal Probability Distributions for Deep Ordinal Classification", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/672", "id": "672", "author_site": "Christopher Beckham, Christopher Pal", "author": "Christopher Beckham; Christopher Pal", "abstract": "Probability distributions produced by the cross-entropy loss for ordinal classification problems can possess undesired properties. We propose a straightforward technique to constrain discrete ordinal probability distributions to be unimodal via the use of the Poisson and binomial probability distributions. We evaluate this approach in the context of deep learning on two large ordinal image datasets, obtaining promising results.", "bibtex": "@InProceedings{pmlr-v70-beckham17a,\n title = \t {Unimodal Probability Distributions for Deep Ordinal Classification},\n author = {Christopher Beckham and Christopher Pal},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {411--419},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/beckham17a/beckham17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/beckham17a.html},\n abstract = \t {Probability distributions produced by the cross-entropy loss for ordinal classification problems can possess undesired properties. We propose a straightforward technique to constrain discrete ordinal probability distributions to be unimodal via the use of the Poisson and binomial probability distributions. We evaluate this approach in the context of deep learning on two large ordinal image datasets, obtaining promising results.}\n}", "pdf": "http://proceedings.mlr.press/v70/beckham17a/beckham17a.pdf", "supp": "", "pdf_size": 776212, "gs_citation": 103, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11177321391478415551&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 6, "aff": "Montr \u00b4eal Institute of Learning Algorithms, Qu \u00b4ebec, Canada; Montr \u00b4eal Institute of Learning Algorithms, Qu \u00b4ebec, Canada", "aff_domain": "polymtl.ca; ", "email": "polymtl.ca; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/beckham17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Montr\u00e9al Institute of Learning Algorithms", "aff_unique_dep": "", "aff_unique_url": "", "aff_unique_abbr": "MILA", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "Canada" }, { "title": "Unsupervised Learning by Predicting Noise", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/520", "id": "520", "author_site": "Piotr Bojanowski, Armand Joulin", "author": "Piotr Bojanowski; Armand Joulin", "abstract": "Convolutional neural networks provide visual features that perform remarkably well in many computer vision applications. However, training these networks requires significant amounts of supervision; this paper introduces a generic framework to train such networks, end-to-end, with no supervision. We propose to fix a set of target representations, called Noise As Targets (NAT), and to constrain the deep features to align to them. This domain agnostic approach avoids the standard unsupervised learning issues of trivial solutions and collapsing of the features. Thanks to a stochastic batch reassignment strategy and a separable square loss function, it scales to millions of images. The proposed approach produces representations that perform on par with the state-of-the-arts among unsupervised methods on ImageNet and Pascal VOC.", "bibtex": "@InProceedings{pmlr-v70-bojanowski17a,\n title = \t {Unsupervised Learning by Predicting Noise},\n author = {Piotr Bojanowski and Armand Joulin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {517--526},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/bojanowski17a/bojanowski17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/bojanowski17a.html},\n abstract = \t {Convolutional neural networks provide visual features that perform remarkably well in many computer vision applications. However, training these networks requires significant amounts of supervision; this paper introduces a generic framework to train such networks, end-to-end, with no supervision. We propose to fix a set of target representations, called Noise As Targets (NAT), and to constrain the deep features to align to them. This domain agnostic approach avoids the standard unsupervised learning issues of trivial solutions and collapsing of the features. Thanks to a stochastic batch reassignment strategy and a separable square loss function, it scales to millions of images. The proposed approach produces representations that perform on par with the state-of-the-arts among unsupervised methods on ImageNet and Pascal VOC.}\n}", "pdf": "http://proceedings.mlr.press/v70/bojanowski17a/bojanowski17a.pdf", "supp": "", "pdf_size": 2290693, "gs_citation": 350, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11409442815137893226&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Facebook AI Research; Facebook AI Research", "aff_domain": "fb.com; ", "email": "fb.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/bojanowski17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Meta", "aff_unique_dep": "Facebook AI Research", "aff_unique_url": "https://research.facebook.com", "aff_unique_abbr": "FAIR", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Variants of RMSProp and Adagrad with Logarithmic Regret Bounds", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/884", "id": "884", "author_site": "Mahesh Chandra Mukkamala, Matthias Hein", "author": "Mahesh Chandra Mukkamala; Matthias Hein", "abstract": "Adaptive gradient methods have become recently very popular, in particular as they have been shown to be useful in the training of deep neural networks. In this paper we have analyzed RMSProp, originally proposed for the training of deep neural networks, in the context of online convex optimization and show $\\sqrt{T}$-type regret bounds. Moreover, we propose two variants SC-Adagrad and SC-RMSProp for which we show logarithmic regret bounds for strongly convex functions. Finally, we demonstrate in the experiments that these new variants outperform other adaptive gradient techniques or stochastic gradient descent in the optimization of strongly convex functions as well as in training of deep neural networks.", "bibtex": "@InProceedings{pmlr-v70-mukkamala17a,\n title = \t {Variants of {RMSP}rop and {A}dagrad with Logarithmic Regret Bounds},\n author = {Mahesh Chandra Mukkamala and Matthias Hein},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2545--2553},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/mukkamala17a/mukkamala17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/mukkamala17a.html},\n abstract = \t {Adaptive gradient methods have become recently very popular, in particular as they have been shown to be useful in the training of deep neural networks. In this paper we have analyzed RMSProp, originally proposed for the training of deep neural networks, in the context of online convex optimization and show $\\sqrt{T}$-type regret bounds. Moreover, we propose two variants SC-Adagrad and SC-RMSProp for which we show logarithmic regret bounds for strongly convex functions. Finally, we demonstrate in the experiments that these new variants outperform other adaptive gradient techniques or stochastic gradient descent in the optimization of strongly convex functions as well as in training of deep neural networks.}\n}", "pdf": "http://proceedings.mlr.press/v70/mukkamala17a/mukkamala17a.pdf", "supp": "", "pdf_size": 4543761, "gs_citation": 377, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17632558278643215712&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 12, "aff": "Department of Mathematics and Computer Science, Saarland University, Germany+IMPRS-CS, Max Planck Institute for Informatics, Saarbr\u00fccken, Germany; Department of Mathematics and Computer Science, Saarland University, Germany", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/mukkamala17a.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "Saarland University;Max Planck Institute for Informatics", "aff_unique_dep": "Department of Mathematics and Computer Science;IMPRS-CS", "aff_unique_url": "https://www.uni-saarland.de;https://mpi-sws.org", "aff_unique_abbr": "Saarland U;MPII", "aff_campus_unique_index": "1", "aff_campus_unique": ";Saarbr\u00fccken", "aff_country_unique_index": "0+0;0", "aff_country_unique": "Germany" }, { "title": "Variational Boosting: Iteratively Refining Posterior Approximations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/558", "id": "558", "author_site": "Andrew Miller, Nicholas J Foti, Ryan P. Adams", "author": "Andrew C. Miller; Nicholas J. Foti; Ryan P. Adams", "abstract": "We propose a black-box variational inference method to approximate intractable distributions with an increasingly rich approximating class. Our method, variational boosting, iteratively refines an existing variational approximation by solving a sequence of optimization problems, allowing a trade-off between computation time and accuracy. We expand the variational approximating class by incorporating additional covariance structure and by introducing new components to form a mixture. We apply variational boosting to synthetic and real statistical models, and show that the resulting posterior inferences compare favorably to existing variational algorithms.", "bibtex": "@InProceedings{pmlr-v70-miller17a,\n title = \t {Variational Boosting: Iteratively Refining Posterior Approximations},\n author = {Andrew C. Miller and Nicholas J. Foti and Ryan P. Adams},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2420--2429},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/miller17a/miller17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/miller17a.html},\n abstract = \t {We propose a black-box variational inference method to approximate intractable distributions with an increasingly rich approximating class. Our method, variational boosting, iteratively refines an existing variational approximation by solving a sequence of optimization problems, allowing a trade-off between computation time and accuracy. We expand the variational approximating class by incorporating additional covariance structure and by introducing new components to form a mixture. We apply variational boosting to synthetic and real statistical models, and show that the resulting posterior inferences compare favorably to existing variational algorithms.}\n}", "pdf": "http://proceedings.mlr.press/v70/miller17a/miller17a.pdf", "supp": "", "pdf_size": 3925400, "gs_citation": -1, "gs_cited_by_link": "", "gs_version_total": -1, "aff": "Harvard University, Cambridge, MA, USA; University of Washington, Seattle, WA, USA; Google Brain, Cambridge, MA, USA", "aff_domain": "seas.harvard.edu;uw.edu; ", "email": "seas.harvard.edu;uw.edu; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/miller17a.html", "aff_unique_index": "0;1;2", "aff_unique_norm": "Harvard University;University of Washington;Google", "aff_unique_dep": ";;Google Brain", "aff_unique_url": "https://www.harvard.edu;https://www.washington.edu;https://brain.google.com", "aff_unique_abbr": "Harvard;UW;Google Brain", "aff_campus_unique_index": "0;1;0", "aff_campus_unique": "Cambridge;Seattle", "aff_country_unique_index": "0;0;0", "aff_country_unique": "United States" }, { "title": "Variational Dropout Sparsifies Deep Neural Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/793", "id": "793", "author_site": "Dmitry Molchanov, Arsenii Ashukha, Dmitry Vetrov", "author": "Dmitry Molchanov; Arsenii Ashukha; Dmitry Vetrov", "abstract": "We explore a recently proposed Variational Dropout technique that provided an elegant Bayesian interpretation to Gaussian Dropout. We extend Variational Dropout to the case when dropout rates are unbounded, propose a way to reduce the variance of the gradient estimator and report first experimental results with individual dropout rates per weight. Interestingly, it leads to extremely sparse solutions both in fully-connected and convolutional layers. This effect is similar to automatic relevance determination effect in empirical Bayes but has a number of advantages. We reduce the number of parameters up to 280 times on LeNet architectures and up to 68 times on VGG-like networks with a negligible decrease of accuracy.", "bibtex": "@InProceedings{pmlr-v70-molchanov17a,\n title = \t {Variational Dropout Sparsifies Deep Neural Networks},\n author = {Dmitry Molchanov and Arsenii Ashukha and Dmitry Vetrov},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2498--2507},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/molchanov17a/molchanov17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/molchanov17a.html},\n abstract = \t {We explore a recently proposed Variational Dropout technique that provided an elegant Bayesian interpretation to Gaussian Dropout. We extend Variational Dropout to the case when dropout rates are unbounded, propose a way to reduce the variance of the gradient estimator and report first experimental results with individual dropout rates per weight. Interestingly, it leads to extremely sparse solutions both in fully-connected and convolutional layers. This effect is similar to automatic relevance determination effect in empirical Bayes but has a number of advantages. We reduce the number of parameters up to 280 times on LeNet architectures and up to 68 times on VGG-like networks with a negligible decrease of accuracy.}\n}", "pdf": "http://proceedings.mlr.press/v70/molchanov17a/molchanov17a.pdf", "supp": "", "pdf_size": 884873, "gs_citation": 1146, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11014728550012194230&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 10, "aff": "Yandex, Russia+Skolkovo Institute of Science and Technology, Skolkovo Innovation Center, Moscow, Russia; National Research University Higher School of Economics, Moscow, Russia+Moscow Institute of Physics and Technology, Moscow, Russia; Yandex, Russia+National Research University Higher School of Economics, Moscow, Russia", "aff_domain": "skolkovotech.ru;gmail.com;yandex.ru", "email": "skolkovotech.ru;gmail.com;yandex.ru", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/molchanov17a.html", "aff_unique_index": "0+1;2+3;0+2", "aff_unique_norm": "Yandex;Skolkovo Institute of Science and Technology;National Research University Higher School of Economics;Moscow Institute of Physics and Technology", "aff_unique_dep": ";;;", "aff_unique_url": "https://yandex.com;https://www.skoltech.ru;https://www.hse.ru;https://www.mipt.ru/en", "aff_unique_abbr": "Yandex;Skoltech;HSE;MIPT", "aff_campus_unique_index": "1;2+2;2", "aff_campus_unique": ";Skolkovo;Moscow", "aff_country_unique_index": "0+0;0+0;0+0", "aff_country_unique": "Russian Federation" }, { "title": "Variational Inference for Sparse and Undirected Models", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/861", "id": "861", "author_site": "John Ingraham, Debora Marks", "author": "John Ingraham; Debora Marks", "abstract": "Undirected graphical models are applied in genomics, protein structure prediction, and neuroscience to identify sparse interactions that underlie discrete data. Although Bayesian methods for inference would be favorable in these contexts, they are rarely used because they require doubly intractable Monte Carlo sampling. Here, we develop a framework for scalable Bayesian inference of discrete undirected models based on two new methods. The first is Persistent VI, an algorithm for variational inference of discrete undirected models that avoids doubly intractable MCMC and approximations of the partition function. The second is Fadeout, a reparameterization approach for variational inference under sparsity-inducing priors that captures a posteriori correlations between parameters and hyperparameters with noncentered parameterizations. We find that, together, these methods for variational inference substantially improve learning of sparse undirected graphical models in simulated and real problems from physics and biology.", "bibtex": "@InProceedings{pmlr-v70-ingraham17a,\n title = \t {Variational Inference for Sparse and Undirected Models},\n author = {John Ingraham and Debora Marks},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1607--1616},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/ingraham17a/ingraham17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/ingraham17a.html},\n abstract = \t {Undirected graphical models are applied in genomics, protein structure prediction, and neuroscience to identify sparse interactions that underlie discrete data. Although Bayesian methods for inference would be favorable in these contexts, they are rarely used because they require doubly intractable Monte Carlo sampling. Here, we develop a framework for scalable Bayesian inference of discrete undirected models based on two new methods. The first is Persistent VI, an algorithm for variational inference of discrete undirected models that avoids doubly intractable MCMC and approximations of the partition function. The second is Fadeout, a reparameterization approach for variational inference under sparsity-inducing priors that captures a posteriori correlations between parameters and hyperparameters with noncentered parameterizations. We find that, together, these methods for variational inference substantially improve learning of sparse undirected graphical models in simulated and real problems from physics and biology.}\n}", "pdf": "http://proceedings.mlr.press/v70/ingraham17a/ingraham17a.pdf", "supp": "", "pdf_size": 4250631, "gs_citation": 33, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6097763724447080489&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "Harvard Medical School; Harvard Medical School", "aff_domain": "fas.harvard.edu;hms.harvard.edu", "email": "fas.harvard.edu;hms.harvard.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/ingraham17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "Harvard University", "aff_unique_dep": "Medical School", "aff_unique_url": "https://hms.harvard.edu", "aff_unique_abbr": "HMS", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Boston", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Variational Policy for Guiding Point Processes", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/798", "id": "798", "author_site": "Yichen Wang, Grady Williams, Evangelos Theodorou, Le Song", "author": "Yichen Wang; Grady Williams; Evangelos Theodorou; Le Song", "abstract": "Temporal point processes have been widely applied to model event sequence data generated by online users. In this paper, we consider the problem of how to design the optimal control policy for point processes, such that the stochastic system driven by the point process is steered to a target state. In particular, we exploit the key insight to view the stochastic optimal control problem from the perspective of optimal measure and variational inference. We further propose a convex optimization framework and an efficient algorithm to update the policy adaptively to the current system state. Experiments on synthetic and real-world data show that our algorithm can steer the user activities much more accurately and efficiently than other stochastic control methods.", "bibtex": "@InProceedings{pmlr-v70-wang17k,\n title = \t {Variational Policy for Guiding Point Processes},\n author = {Yichen Wang and Grady Williams and Evangelos Theodorou and Le Song},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3684--3693},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/wang17k/wang17k.pdf},\n url = \t {https://proceedings.mlr.press/v70/wang17k.html},\n abstract = \t {Temporal point processes have been widely applied to model event sequence data generated by online users. In this paper, we consider the problem of how to design the optimal control policy for point processes, such that the stochastic system driven by the point process is steered to a target state. In particular, we exploit the key insight to view the stochastic optimal control problem from the perspective of optimal measure and variational inference. We further propose a convex optimization framework and an efficient algorithm to update the policy adaptively to the current system state. Experiments on synthetic and real-world data show that our algorithm can steer the user activities much more accurately and efficiently than other stochastic control methods.}\n}", "pdf": "http://proceedings.mlr.press/v70/wang17k/wang17k.pdf", "supp": "", "pdf_size": 1065051, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14971200761039230720&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 5, "aff": "College of Computing, Georgia Tech, Atlanta, GA, USA; School of Aerospace Engineering, Georgia Tech; School of Aerospace Engineering, Georgia Tech; College of Computing, Georgia Tech, Atlanta, GA, USA", "aff_domain": "gatech.edu; ; ; ", "email": "gatech.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/wang17k.html", "aff_unique_index": "0;1;1;0", "aff_unique_norm": "Georgia Institute of Technology;Georgia Tech", "aff_unique_dep": "College of Computing;School of Aerospace Engineering", "aff_unique_url": "https://www.gatech.edu;https://www.gatech.edu", "aff_unique_abbr": "Georgia Tech;GT", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Atlanta", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" }, { "title": "Video Pixel Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/720", "id": "720", "author_site": "Nal Kalchbrenner, Karen Simonyan, A\u00e4ron van den Oord, Ivo Danihelka, Oriol Vinyals, Alex Graves, Koray Kavukcuoglu", "author": "Nal Kalchbrenner; A\u00e4ron Oord; Karen Simonyan; Ivo Danihelka; Oriol Vinyals; Alex Graves; Koray Kavukcuoglu", "abstract": "We propose a probabilistic video model, the Video Pixel Network (VPN), that estimates the discrete joint distribution of the raw pixel values in a video. The model and the neural architecture reflect the time, space and color structure of video tensors and encode it as a four-dimensional dependency chain. The VPN approaches the best possible performance on the Moving MNIST benchmark, a leap over the previous state of the art, and the generated videos show only minor deviations from the ground truth. The VPN also produces detailed samples on the action-conditional Robotic Pushing benchmark and generalizes to the motion of novel objects.", "bibtex": "@InProceedings{pmlr-v70-kalchbrenner17a,\n title = \t {Video Pixel Networks},\n author = {Nal Kalchbrenner and A{\\\"a}ron van den Oord and Karen Simonyan and Ivo Danihelka and Oriol Vinyals and Alex Graves and Koray Kavukcuoglu},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1771--1779},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/kalchbrenner17a/kalchbrenner17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/kalchbrenner17a.html},\n abstract = \t {We propose a probabilistic video model, the Video Pixel Network (VPN), that estimates the discrete joint distribution of the raw pixel values in a video. The model and the neural architecture reflect the time, space and color structure of video tensors and encode it as a four-dimensional dependency chain. The VPN approaches the best possible performance on the Moving MNIST benchmark, a leap over the previous state of the art, and the generated videos show only minor deviations from the ground truth. The VPN also produces detailed samples on the action-conditional Robotic Pushing benchmark and generalizes to the motion of novel objects.}\n}", "pdf": "http://proceedings.mlr.press/v70/kalchbrenner17a/kalchbrenner17a.pdf", "supp": "", "pdf_size": 4218040, "gs_citation": 528, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9254314516710917445&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Google DeepMind, London, UK; Google DeepMind, London, UK; Google DeepMind, London, UK; Google DeepMind, London, UK; Google DeepMind, London, UK; Google DeepMind, London, UK; Google DeepMind, London, UK", "aff_domain": "google.com; ; ; ; ; ; ", "email": "google.com; ; ; ; ; ; ", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/kalchbrenner17a.html", "aff_unique_index": "0;0;0;0;0;0;0", "aff_unique_norm": "Google", "aff_unique_dep": "Google DeepMind", "aff_unique_url": "https://deepmind.com", "aff_unique_abbr": "DeepMind", "aff_campus_unique_index": "0;0;0;0;0;0;0", "aff_campus_unique": "London", "aff_country_unique_index": "0;0;0;0;0;0;0", "aff_country_unique": "United Kingdom" }, { "title": "Visualizing and Understanding Multilayer Perceptron Models: A Case Study in Speech Processing", "author": "Tasha Nagamine, Nima Mesgarani", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/826", "id": "826" }, { "title": "Warped Convolutions: Efficient Invariance to Spatial Transformations", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/660", "id": "660", "author_site": "Joao Henriques, Andrea Vedaldi", "author": "Jo\u00e3o F. Henriques; Andrea Vedaldi", "abstract": "Convolutional Neural Networks (CNNs) are extremely efficient, since they exploit the inherent translation-invariance of natural images. However, translation is just one of a myriad of useful spatial transformations. Can the same efficiency be attained when considering other spatial invariances? Such generalized convolutions have been considered in the past, but at a high computational cost. We present a construction that is simple and exact, yet has the same computational complexity that standard convolutions enjoy. It consists of a constant image warp followed by a simple convolution, which are standard blocks in deep learning toolboxes. With a carefully crafted warp, the resulting architecture can be made equivariant to a wide range of two-parameter spatial transformations. We show encouraging results in realistic scenarios, including the estimation of vehicle poses in the Google Earth dataset (rotation and scale), and face poses in Annotated Facial Landmarks in the Wild (3D rotations under perspective).", "bibtex": "@InProceedings{pmlr-v70-henriques17a,\n title = \t {Warped Convolutions: Efficient Invariance to Spatial Transformations},\n author = {Jo{\\~a}o F. Henriques and Andrea Vedaldi},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1461--1469},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/henriques17a/henriques17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/henriques17a.html},\n abstract = \t {Convolutional Neural Networks (CNNs) are extremely efficient, since they exploit the inherent translation-invariance of natural images. However, translation is just one of a myriad of useful spatial transformations. Can the same efficiency be attained when considering other spatial invariances? Such generalized convolutions have been considered in the past, but at a high computational cost. We present a construction that is simple and exact, yet has the same computational complexity that standard convolutions enjoy. It consists of a constant image warp followed by a simple convolution, which are standard blocks in deep learning toolboxes. With a carefully crafted warp, the resulting architecture can be made equivariant to a wide range of two-parameter spatial transformations. We show encouraging results in realistic scenarios, including the estimation of vehicle poses in the Google Earth dataset (rotation and scale), and face poses in Annotated Facial Landmarks in the Wild (3D rotations under perspective).}\n}", "pdf": "http://proceedings.mlr.press/v70/henriques17a/henriques17a.pdf", "supp": "", "pdf_size": 799543, "gs_citation": 128, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10896480336756687375&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 11, "aff": "Visual Geometry Group, University of Oxford; Visual Geometry Group, University of Oxford", "aff_domain": "robots.ox.ac.uk; ", "email": "robots.ox.ac.uk; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/henriques17a.html", "aff_unique_index": "0;0", "aff_unique_norm": "University of Oxford", "aff_unique_dep": "Visual Geometry Group", "aff_unique_url": "https://www.ox.ac.uk", "aff_unique_abbr": "Oxford", "aff_campus_unique_index": "0;0", "aff_campus_unique": "Oxford", "aff_country_unique_index": "0;0", "aff_country_unique": "United Kingdom" }, { "title": "Wasserstein Generative Adversarial Networks", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/799", "id": "799", "author_site": "Martin Arjovsky, Soumith Chintala, L\u00e9on Bottou", "author": "Martin Arjovsky; Soumith Chintala; L\u00e9on Bottou", "abstract": "We introduce a new algorithm named WGAN, an alternative to traditional GAN training. In this new model, we show that we can improve the stability of learning, get rid of problems like mode collapse, and provide meaningful learning curves useful for debugging and hyperparameter searches. Furthermore, we show that the corresponding optimization problem is sound, and provide extensive theoretical work highlighting the deep connections to different distances between distributions.", "bibtex": "@InProceedings{pmlr-v70-arjovsky17a,\n title = \t {{W}asserstein Generative Adversarial Networks},\n author = {Martin Arjovsky and Soumith Chintala and L{\\'e}on Bottou},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {214--223},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/arjovsky17a/arjovsky17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/arjovsky17a.html},\n abstract = \t {We introduce a new algorithm named WGAN, an alternative to traditional GAN training. In this new model, we show that we can improve the stability of learning, get rid of problems like mode collapse, and provide meaningful learning curves useful for debugging and hyperparameter searches. Furthermore, we show that the corresponding optimization problem is sound, and provide extensive theoretical work highlighting the deep connections to different distances between distributions.}\n}", "pdf": "http://proceedings.mlr.press/v70/arjovsky17a/arjovsky17a.pdf", "supp": "", "pdf_size": 3374350, "gs_citation": 19098, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11268888003834576266&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "Courant Institute of Mathematical Sciences, NY; Facebook AI Research, NY; Courant Institute of Mathematical Sciences, NY + Facebook AI Research, NY", "aff_domain": "gmail.com; ; ", "email": "gmail.com; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/arjovsky17a.html", "aff_unique_index": "0;1;0+1", "aff_unique_norm": "Courant Institute of Mathematical Sciences;Meta", "aff_unique_dep": "Mathematical Sciences;Facebook AI Research", "aff_unique_url": "https://courant.nyu.edu;https://research.facebook.com", "aff_unique_abbr": "Courant;FAIR", "aff_campus_unique_index": "0;0;0+0", "aff_campus_unique": "New York", "aff_country_unique_index": "0;0;0+0", "aff_country_unique": "United States" }, { "title": "When can Multi-Site Datasets be Pooled for Regression? Hypothesis Tests, $\\ell_2$-consistency and Neuroscience Applications", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/487", "id": "487", "author_site": "Hao Zhou, Yilin Zhang, Vamsi Krishna Ithapu, Sterling Johnson, Grace Wahba, Vikas Singh", "author": "Hao Henry Zhou; Yilin Zhang; Vamsi K. Ithapu; Sterling C. Johnson; Grace Wahba; Vikas Singh", "abstract": "Many studies in biomedical and health sciences involve small sample sizes due to logistic or financial constraints. Often, identifying weak (but scientifically interesting) associations between a set of predictors and a response necessitates pooling datasets from multiple diverse labs or groups. While there is a rich literature in statistical machine learning to address distributional shifts and inference in multi-site datasets, it is less clear when such pooling is guaranteed to help (and when it does not) \u2013 independent of the inference algorithms we use. In this paper, we present a hypothesis test to answer this question, both for classical and high dimensional linear regression. We precisely identify regimes where pooling datasets across multiple sites is sensible, and how such policy decisions can be made via simple checks executable on each site before any data transfer ever happens. With a focus on Alzheimer\u2019s disease studies, we present empirical results showing that in regimes suggested by our analysis, pooling a local dataset with data from an international study improves power.", "bibtex": "@InProceedings{pmlr-v70-zhou17c,\n title = \t {When can Multi-Site Datasets be Pooled for Regression? {H}ypothesis Tests, $\\ell_2$-consistency and Neuroscience Applications},\n author = {Hao Henry Zhou and Yilin Zhang and Vamsi K. Ithapu and Sterling C. Johnson and Grace Wahba and Vikas Singh},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4170--4179},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhou17c/zhou17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhou17c.html},\n abstract = \t {Many studies in biomedical and health sciences involve small sample sizes due to logistic or financial constraints. Often, identifying weak (but scientifically interesting) associations between a set of predictors and a response necessitates pooling datasets from multiple diverse labs or groups. While there is a rich literature in statistical machine learning to address distributional shifts and inference in multi-site datasets, it is less clear when such pooling is guaranteed to help (and when it does not) \u2013 independent of the inference algorithms we use. In this paper, we present a hypothesis test to answer this question, both for classical and high dimensional linear regression. We precisely identify regimes where pooling datasets across multiple sites is sensible, and how such policy decisions can be made via simple checks executable on each site before any data transfer ever happens. With a focus on Alzheimer\u2019s disease studies, we present empirical results showing that in regimes suggested by our analysis, pooling a local dataset with data from an international study improves power.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhou17c/zhou17c.pdf", "supp": "", "pdf_size": 384918, "gs_citation": 18, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4992094152465045354&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 14, "aff": "University of Wisconsin-Madison; University of Wisconsin-Madison; University of Wisconsin-Madison; University of Wisconsin-Madison+William S. Middleton Memorial Veteran\u2019s Affairs Hospital; University of Wisconsin-Madison; University of Wisconsin-Madison", "aff_domain": "stat.wisc.edu; ; ; ; ;biostat.wisc.edu", "email": "stat.wisc.edu; ; ; ; ;biostat.wisc.edu", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/zhou17c.html", "aff_unique_index": "0;0;0;0+1;0;0", "aff_unique_norm": "University of Wisconsin-Madison;William S. Middleton Memorial Veteran's Affairs Medical Center", "aff_unique_dep": ";", "aff_unique_url": "https://www.wisc.edu;https://www.wisconsin.gov/Pages/HealthCare/WisconsinVAFacilities/WilliamSMiddletonMemorialVAMC.aspx", "aff_unique_abbr": "UW-Madison;", "aff_campus_unique_index": "0;0;0;0;0;0", "aff_campus_unique": "Madison;", "aff_country_unique_index": "0;0;0;0+0;0;0", "aff_country_unique": "United States" }, { "title": "Why is Posterior Sampling Better than Optimism for Reinforcement Learning?", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/589", "id": "589", "author_site": "Ian Osband, Benjamin Van Roy", "author": "Ian Osband; Benjamin Van Roy", "abstract": "Computational results demonstrate that posterior sampling for reinforcement learning (PSRL) dramatically outperforms existing algorithms driven by optimism, such as UCRL2. We provide insight into the extent of this performance boost and the phenomenon that drives it. We leverage this insight to establish an $\\tilde{O}(H\\sqrt{SAT})$ Bayesian regret bound for PSRL in finite-horizon episodic Markov decision processes. This improves upon the best previous Bayesian regret bound of $\\tilde{O}(H S \\sqrt{AT})$ for any reinforcement learning algorithm. Our theoretical results are supported by extensive empirical evaluation.", "bibtex": "@InProceedings{pmlr-v70-osband17a,\n title = \t {Why is Posterior Sampling Better than Optimism for Reinforcement Learning?},\n author = {Ian Osband and Van Roy, Benjamin},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2701--2710},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/osband17a/osband17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/osband17a.html},\n abstract = \t {Computational results demonstrate that posterior sampling for reinforcement learning (PSRL) dramatically outperforms existing algorithms driven by optimism, such as UCRL2. We provide insight into the extent of this performance boost and the phenomenon that drives it. We leverage this insight to establish an $\\tilde{O}(H\\sqrt{SAT})$ Bayesian regret bound for PSRL in finite-horizon episodic Markov decision processes. This improves upon the best previous Bayesian regret bound of $\\tilde{O}(H S \\sqrt{AT})$ for any reinforcement learning algorithm. Our theoretical results are supported by extensive empirical evaluation.}\n}", "pdf": "http://proceedings.mlr.press/v70/osband17a/osband17a.pdf", "supp": "", "pdf_size": 2778546, "gs_citation": 295, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15289447082157891829&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 9, "aff": "Stanford University, California, USA+Deepmind, London, UK; Stanford University, California, USA", "aff_domain": "gmail.com; ", "email": "gmail.com; ", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/osband17a.html", "aff_unique_index": "0+1;0", "aff_unique_norm": "Stanford University;DeepMind", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://deepmind.com", "aff_unique_abbr": "Stanford;DeepMind", "aff_campus_unique_index": "0+1;0", "aff_campus_unique": "California;London", "aff_country_unique_index": "0+1;0", "aff_country_unique": "United States;United Kingdom" }, { "title": "World of Bits: An Open-Domain Platform for Web-Based Agents", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/843", "id": "843", "author_site": "Tim Shi, Andrej Karpathy, Jim Fan, Jonathan Hernandez, Percy Liang", "author": "Tianlin Shi; Andrej Karpathy; Linxi Fan; Jonathan Hernandez; Percy Liang", "abstract": "While simulated game environments have greatly accelerated research in reinforcement learning, existing environments lack the open-domain realism of tasks in computer vision or natural language processing, which operate on artifacts created by humans in natural, organic settings. To foster reinforcement learning research in such settings, we introduce the World of Bits (WoB), a platform in which agents complete tasks on the Internet by performing low-level keyboard and mouse actions. The two main challenges are: (i) to curate a large, diverse set of interesting web-based tasks, and (ii) to ensure that these tasks have a well-defined reward structure and are reproducible despite the transience of the web. To do this, we develop a methodology in which crowdworkers create tasks defined by natural language questions and provide demonstrations of how to answer the question on real websites using keyboard and mouse; HTTP traffic is cached to create a reproducible offline approximation of the web site. Finally, we show that agents trained via behavioral cloning and reinforcement learning can successfully complete a range of our web-based tasks.", "bibtex": "@InProceedings{pmlr-v70-shi17a,\n title = \t {World of Bits: An Open-Domain Platform for Web-Based Agents},\n author = {Tianlin Shi and Andrej Karpathy and Linxi Fan and Jonathan Hernandez and Percy Liang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3135--3144},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/shi17a/shi17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/shi17a.html},\n abstract = \t {While simulated game environments have greatly accelerated research in reinforcement learning, existing environments lack the open-domain realism of tasks in computer vision or natural language processing, which operate on artifacts created by humans in natural, organic settings. To foster reinforcement learning research in such settings, we introduce the World of Bits (WoB), a platform in which agents complete tasks on the Internet by performing low-level keyboard and mouse actions. The two main challenges are: (i) to curate a large, diverse set of interesting web-based tasks, and (ii) to ensure that these tasks have a well-defined reward structure and are reproducible despite the transience of the web. To do this, we develop a methodology in which crowdworkers create tasks defined by natural language questions and provide demonstrations of how to answer the question on real websites using keyboard and mouse; HTTP traffic is cached to create a reproducible offline approximation of the web site. Finally, we show that agents trained via behavioral cloning and reinforcement learning can successfully complete a range of our web-based tasks.}\n}", "pdf": "http://proceedings.mlr.press/v70/shi17a/shi17a.pdf", "supp": "", "pdf_size": 3383879, "gs_citation": 244, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6749331714869626365&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 4, "aff": "Stanford University, Stanford, USA+OpenAI, San Francisco, USA; OpenAI, San Francisco, USA; Stanford University, Stanford, USA; OpenAI, San Francisco, USA; Stanford University, Stanford, USA", "aff_domain": "cs.stanford.edu; ; ; ; ", "email": "cs.stanford.edu; ; ; ; ", "github": "", "project": "https://goo.gl/JdLQGT", "author_num": 5, "oa": "https://proceedings.mlr.press/v70/shi17a.html", "aff_unique_index": "0+1;1;0;1;0", "aff_unique_norm": "Stanford University;OpenAI", "aff_unique_dep": ";", "aff_unique_url": "https://www.stanford.edu;https://openai.com", "aff_unique_abbr": "Stanford;OpenAI", "aff_campus_unique_index": "0+1;1;0;1;0", "aff_campus_unique": "Stanford;San Francisco", "aff_country_unique_index": "0+0;0;0;0;0", "aff_country_unique": "United States" }, { "title": "Zero-Inflated Exponential Family Embeddings", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/625", "id": "625", "author_site": "Liping Liu, David Blei", "author": "Li-Ping Liu; David M. Blei", "abstract": "Word embeddings are a widely-used tool to analyze language, and exponential family embeddings (Rudolph et al., 2016) generalize the technique to other types of data. One challenge to fitting embedding methods is sparse data, such as a document/term matrix that contains many zeros. To address this issue, practitioners typically downweight or subsample the zeros, thus focusing learning on the non-zero entries. In this paper, we develop zero-inflated embeddings, a new embedding method that is designed to learn from sparse observations. In a zero-inflated embedding (ZIE), a zero in the data can come from an interaction to other data (i.e., an embedding) or from a separate process by which many observations are equal to zero (i.e. a probability mass at zero). Fitting a ZIE naturally downweights the zeros and dampens their influence on the model. Across many types of data\u2014language, movie ratings, shopping histories, and bird watching logs\u2014we found that zero-inflated embeddings provide improved predictive performance over standard approaches and find better vector representation of items.", "bibtex": "@InProceedings{pmlr-v70-liu17a,\n title = \t {Zero-Inflated Exponential Family Embeddings},\n author = {Li-Ping Liu and David M. Blei},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2140--2148},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/liu17a/liu17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/liu17a.html},\n abstract = \t {Word embeddings are a widely-used tool to analyze language, and exponential family embeddings (Rudolph et al., 2016) generalize the technique to other types of data. One challenge to fitting embedding methods is sparse data, such as a document/term matrix that contains many zeros. To address this issue, practitioners typically downweight or subsample the zeros, thus focusing learning on the non-zero entries. In this paper, we develop zero-inflated embeddings, a new embedding method that is designed to learn from sparse observations. In a zero-inflated embedding (ZIE), a zero in the data can come from an interaction to other data (i.e., an embedding) or from a separate process by which many observations are equal to zero (i.e. a probability mass at zero). Fitting a ZIE naturally downweights the zeros and dampens their influence on the model. Across many types of data\u2014language, movie ratings, shopping histories, and bird watching logs\u2014we found that zero-inflated embeddings provide improved predictive performance over standard approaches and find better vector representation of items.}\n}", "pdf": "http://proceedings.mlr.press/v70/liu17a/liu17a.pdf", "supp": "", "pdf_size": 442220, "gs_citation": 12, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=242898130250517229&as_sdt=400005&sciodt=0,14&hl=en", "gs_version_total": 6, "aff": "Columbia University; Tufts University", "aff_domain": "columbia.edu;columbia.edu", "email": "columbia.edu;columbia.edu", "github": "", "project": "", "author_num": 2, "oa": "https://proceedings.mlr.press/v70/liu17a.html", "aff_unique_index": "0;1", "aff_unique_norm": "Columbia University;Tufts University", "aff_unique_dep": ";", "aff_unique_url": "https://www.columbia.edu;https://www.tufts.edu", "aff_unique_abbr": "Columbia;Tufts", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;0", "aff_country_unique": "United States" }, { "title": "Zero-Shot Task Generalization with Multi-Task Deep Reinforcement Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/822", "id": "822", "author_site": "Junhyuk Oh, Satinder Singh, Honglak Lee, Pushmeet Kohli", "author": "Junhyuk Oh; Satinder Singh; Honglak Lee; Pushmeet Kohli", "abstract": "As a step towards developing zero-shot task generalization capabilities in reinforcement learning (RL), we introduce a new RL problem where the agent should learn to execute sequences of instructions after learning useful skills that solve subtasks. In this problem, we consider two types of generalizations: to previously unseen instructions and to longer sequences of instructions. For generalization over unseen instructions, we propose a new objective which encourages learning correspondences between similar subtasks by making analogies. For generalization over sequential instructions, we present a hierarchical architecture where a meta controller learns to use the acquired skills for executing the instructions. To deal with delayed reward, we propose a new neural architecture in the meta controller that learns when to update the subtask, which makes learning more efficient. Experimental results on a stochastic 3D domain show that the proposed ideas are crucial for generalization to longer instructions as well as unseen instructions.", "bibtex": "@InProceedings{pmlr-v70-oh17a,\n title = \t {Zero-Shot Task Generalization with Multi-Task Deep Reinforcement Learning},\n author = {Junhyuk Oh and Satinder Singh and Honglak Lee and Pushmeet Kohli},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {2661--2670},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/oh17a/oh17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/oh17a.html},\n abstract = \t {As a step towards developing zero-shot task generalization capabilities in reinforcement learning (RL), we introduce a new RL problem where the agent should learn to execute sequences of instructions after learning useful skills that solve subtasks. In this problem, we consider two types of generalizations: to previously unseen instructions and to longer sequences of instructions. For generalization over unseen instructions, we propose a new objective which encourages learning correspondences between similar subtasks by making analogies. For generalization over sequential instructions, we present a hierarchical architecture where a meta controller learns to use the acquired skills for executing the instructions. To deal with delayed reward, we propose a new neural architecture in the meta controller that learns when to update the subtask, which makes learning more efficient. Experimental results on a stochastic 3D domain show that the proposed ideas are crucial for generalization to longer instructions as well as unseen instructions.}\n}", "pdf": "http://proceedings.mlr.press/v70/oh17a/oh17a.pdf", "supp": "", "pdf_size": 1438935, "gs_citation": 333, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9061930925606461118&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 8, "aff": "University of Michigan; University of Michigan; University of Michigan + Google Brain; Microsoft Research", "aff_domain": "umich.edu; ; ; ", "email": "umich.edu; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/oh17a.html", "aff_unique_index": "0;0;0+1;2", "aff_unique_norm": "University of Michigan;Google;Microsoft", "aff_unique_dep": ";Google Brain;Microsoft Research", "aff_unique_url": "https://www.umich.edu;https://brain.google.com;https://www.microsoft.com/en-us/research", "aff_unique_abbr": "UM;Google Brain;MSR", "aff_campus_unique_index": "1", "aff_campus_unique": ";Mountain View", "aff_country_unique_index": "0;0;0+0;0", "aff_country_unique": "United States" }, { "title": "ZipML: Training Linear Models with End-to-End Low Precision, and a Little Bit of Deep Learning", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/659", "id": "659", "author_site": "Hantian Zhang, Jerry Li, Kaan Kara, Dan Alistarh, Ji Liu, Ce Zhang", "author": "Hantian Zhang; Jerry Li; Kaan Kara; Dan Alistarh; Ji Liu; Ce Zhang", "abstract": "Recently there has been significant interest in training machine-learning models at low precision: by reducing precision, one can reduce computation and communication by one order of magnitude. We examine training at reduced precision, both from a theoretical and practical perspective, and ask: is it possible to train models at end-to-end low precision with provable guarantees? Can this lead to consistent order-of-magnitude speedups? We mainly focus on linear models, and the answer is yes for linear models. We develop a simple framework called ZipML based on one simple but novel strategy called double sampling. Our ZipML framework is able to execute training at low precision with no bias, guaranteeing convergence, whereas naive quantization would introduce significant bias. We validate our framework across a range of applications, and show that it enables an FPGA prototype that is up to $6.5\\times$ faster than an implementation using full 32-bit precision. We further develop a variance-optimal stochastic quantization strategy and show that it can make a significant difference in a variety of settings. When applied to linear models together with double sampling, we save up to another $1.7\\times$ in data movement compared with uniform quantization. When training deep networks with quantized models, we achieve higher accuracy than the state-of-the-art XNOR-Net.", "bibtex": "@InProceedings{pmlr-v70-zhang17e,\n title = \t {{Z}ip{ML}: Training Linear Models with End-to-End Low Precision, and a Little Bit of Deep Learning},\n author = {Hantian Zhang and Jerry Li and Kaan Kara and Dan Alistarh and Ji Liu and Ce Zhang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {4035--4043},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/zhang17e/zhang17e.pdf},\n url = \t {https://proceedings.mlr.press/v70/zhang17e.html},\n abstract = \t {Recently there has been significant interest in training machine-learning models at low precision: by reducing precision, one can reduce computation and communication by one order of magnitude. We examine training at reduced precision, both from a theoretical and practical perspective, and ask: is it possible to train models at end-to-end low precision with provable guarantees? Can this lead to consistent order-of-magnitude speedups? We mainly focus on linear models, and the answer is yes for linear models. We develop a simple framework called ZipML based on one simple but novel strategy called double sampling. Our ZipML framework is able to execute training at low precision with no bias, guaranteeing convergence, whereas naive quantization would introduce significant bias. We validate our framework across a range of applications, and show that it enables an FPGA prototype that is up to $6.5\\times$ faster than an implementation using full 32-bit precision. We further develop a variance-optimal stochastic quantization strategy and show that it can make a significant difference in a variety of settings. When applied to linear models together with double sampling, we save up to another $1.7\\times$ in data movement compared with uniform quantization. When training deep networks with quantized models, we achieve higher accuracy than the state-of-the-art XNOR-Net.}\n}", "pdf": "http://proceedings.mlr.press/v70/zhang17e/zhang17e.pdf", "supp": "", "pdf_size": 849345, "gs_citation": 227, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16735351654494070286&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 7, "aff": "ETH Zurich, Switzerland; Massachusetts Institute of Technology, USA; ETH Zurich, Switzerland; IST Austria, Austria; University of Rochester, USA; ETH Zurich, Switzerland", "aff_domain": "inf.ethz.ch; ; ;ist.ac.at; ;inf.ethz.ch", "email": "inf.ethz.ch; ; ;ist.ac.at; ;inf.ethz.ch", "github": "", "project": "", "author_num": 6, "oa": "https://proceedings.mlr.press/v70/zhang17e.html", "aff_unique_index": "0;1;0;2;3;0", "aff_unique_norm": "ETH Zurich;Massachusetts Institute of Technology;Institute of Science and Technology Austria;University of Rochester", "aff_unique_dep": ";;;", "aff_unique_url": "https://www.ethz.ch;https://web.mit.edu;https://www.ist.ac.at;https://www.rochester.edu", "aff_unique_abbr": "ETHZ;MIT;IST Austria;U of R", "aff_campus_unique_index": "", "aff_campus_unique": "", "aff_country_unique_index": "0;1;0;2;1;0", "aff_country_unique": "Switzerland;United States;Austria" }, { "title": "Zonotope Hit-and-run for Efficient Sampling from Projection DPPs", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/680", "id": "680", "author_site": "Guillaume Gautier, R\u00e9mi Bardenet, Michal Valko", "author": "Guillaume Gautier; R\u00e9mi Bardenet; Michal Valko", "abstract": "Determinantal point processes (DPPs) are distributions over sets of items that model diversity using kernels. Their applications in machine learning include summary extraction and recommendation systems. Yet, the cost of sampling from a DPP is prohibitive in large-scale applications, which has triggered an effort towards efficient approximate samplers. We build a novel MCMC sampler that combines ideas from combinatorial geometry, linear programming, and Monte Carlo methods to sample from DPPs with a fixed sample cardinality, also called projection DPPs. Our sampler leverages the ability of the hit-and-run MCMC kernel to efficiently move across convex bodies. Previous theoretical results yield a fast mixing time of our chain when targeting a distribution that is close to a projection DPP, but not a DPP in general. Our empirical results demonstrate that this extends to sampling projection DPPs, i.e., our sampler is more sample-efficient than previous approaches which in turn translates to faster convergence when dealing with costly-to-evaluate functions, such as summary extraction in our experiments.", "bibtex": "@InProceedings{pmlr-v70-gautier17a,\n title = \t {Zonotope Hit-and-run for Efficient Sampling from Projection {DPP}s},\n author = {Guillaume Gautier and R{\\'e}mi Bardenet and Michal Valko},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1223--1232},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/gautier17a/gautier17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/gautier17a.html},\n abstract = \t {Determinantal point processes (DPPs) are distributions over sets of items that model diversity using kernels. Their applications in machine learning include summary extraction and recommendation systems. Yet, the cost of sampling from a DPP is prohibitive in large-scale applications, which has triggered an effort towards efficient approximate samplers. We build a novel MCMC sampler that combines ideas from combinatorial geometry, linear programming, and Monte Carlo methods to sample from DPPs with a fixed sample cardinality, also called projection DPPs. Our sampler leverages the ability of the hit-and-run MCMC kernel to efficiently move across convex bodies. Previous theoretical results yield a fast mixing time of our chain when targeting a distribution that is close to a projection DPP, but not a DPP in general. Our empirical results demonstrate that this extends to sampling projection DPPs, i.e., our sampler is more sample-efficient than previous approaches which in turn translates to faster convergence when dealing with costly-to-evaluate functions, such as summary extraction in our experiments.}\n}", "pdf": "http://proceedings.mlr.press/v70/gautier17a/gautier17a.pdf", "supp": "", "pdf_size": 4468841, "gs_citation": 21, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16479278541282682112&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 14, "aff": "Univ. Lille, CNRS, Centrale Lille, UMR 9189 \u2014 CRIStAL; INRIA Lille \u2014 Nord Europe, SequeL team; INRIA Lille \u2014 Nord Europe, SequeL team", "aff_domain": "inria.fr; ; ", "email": "inria.fr; ; ", "github": "", "project": "", "author_num": 3, "oa": "https://proceedings.mlr.press/v70/gautier17a.html", "aff_unique_index": "0;1;1", "aff_unique_norm": "University of Lille;INRIA Lille \u2014 Nord Europe", "aff_unique_dep": "UMR 9189 \u2014 CRIStAL;SequeL team", "aff_unique_url": "https://www.univ-lille.fr;https://www.inria.fr/lille", "aff_unique_abbr": "Univ. Lille;INRIA", "aff_campus_unique_index": "1;1", "aff_campus_unique": ";Lille", "aff_country_unique_index": "0;0;0", "aff_country_unique": "France" }, { "title": "iSurvive: An Interpretable, Event-time Prediction Model for mHealth", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/732", "id": "732", "author_site": "Walter Dempsey, Alexander Moreno, James Rehg, Susan Murphy, Chris Scott, Michael Dennis, David Gustafson", "author": "Walter H. Dempsey; Alexander Moreno; Christy K. Scott; Michael L. Dennis; David H. Gustafson; Susan A. Murphy; James M. Rehg", "abstract": "An important mobile health (mHealth) task is the use of multimodal data, such as sensor streams and self-report, to construct interpretable time-to-event predictions of, for example, lapse to alcohol or illicit drug use. Interpretability of the prediction model is important for acceptance and adoption by domain scientists, enabling model outputs and parameters to inform theory and guide intervention design. Temporal latent state models are therefore attractive, and so we adopt the continuous time hidden Markov model (CT-HMM) due to its ability to describe irregular arrival times of event data. Standard CT-HMMs, however, are not specialized for predicting the time to a future event, the key variable for mHealth interventions. Also, standard emission models lack a sufficiently rich structure to describe multimodal data and incorporate domain knowledge. We present iSurvive, an extension of classical survival analysis to a CT-HMM. We present a parameter learning method for GLM emissions and survival model fitting, and present promising results on both synthetic data and an mHealth drug use dataset.", "bibtex": "@InProceedings{pmlr-v70-dempsey17a,\n title = \t {i{S}urvive: An Interpretable, Event-time Prediction Model for m{H}ealth},\n author = {Walter H. Dempsey and Alexander Moreno and Christy K. Scott and Michael L. Dennis and David H. Gustafson and Susan A. Murphy and James M. Rehg},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {970--979},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/dempsey17a/dempsey17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/dempsey17a.html},\n abstract = \t {An important mobile health (mHealth) task is the use of multimodal data, such as sensor streams and self-report, to construct interpretable time-to-event predictions of, for example, lapse to alcohol or illicit drug use. Interpretability of the prediction model is important for acceptance and adoption by domain scientists, enabling model outputs and parameters to inform theory and guide intervention design. Temporal latent state models are therefore attractive, and so we adopt the continuous time hidden Markov model (CT-HMM) due to its ability to describe irregular arrival times of event data. Standard CT-HMMs, however, are not specialized for predicting the time to a future event, the key variable for mHealth interventions. Also, standard emission models lack a sufficiently rich structure to describe multimodal data and incorporate domain knowledge. We present iSurvive, an extension of classical survival analysis to a CT-HMM. We present a parameter learning method for GLM emissions and survival model fitting, and present promising results on both synthetic data and an mHealth drug use dataset.}\n}", "pdf": "http://proceedings.mlr.press/v70/dempsey17a/dempsey17a.pdf", "supp": "", "pdf_size": 661353, "gs_citation": 30, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=342966309558162347&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 11, "aff": ";;;;;;", "aff_domain": ";;;;;;", "email": ";;;;;;", "github": "", "project": "", "author_num": 7, "oa": "https://proceedings.mlr.press/v70/dempsey17a.html" }, { "title": "meProp: Sparsified Back Propagation for Accelerated Deep Learning with Reduced Overfitting", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/567", "id": "567", "author_site": "Xu SUN, Xuancheng REN, Shuming Ma, Houfeng Wang", "author": "Xu Sun; Xuancheng Ren; Shuming Ma; Houfeng Wang", "abstract": "We propose a simple yet effective technique for neural network learning. The forward propagation is computed as usual. In back propagation, only a small subset of the full gradient is computed to update the model parameters. The gradient vectors are sparsified in such a way that only the top-$k$ elements (in terms of magnitude) are kept. As a result, only $k$ rows or columns (depending on the layout) of the weight matrix are modified, leading to a linear reduction ($k$ divided by the vector dimension) in the computational cost. Surprisingly, experimental results demonstrate that we can update only 1\u20134\\% of the weights at each back propagation pass. This does not result in a larger number of training iterations. More interestingly, the accuracy of the resulting models is actually improved rather than degraded, and a detailed analysis is given.", "bibtex": "@InProceedings{pmlr-v70-sun17c,\n title = \t {me{P}rop: Sparsified Back Propagation for Accelerated Deep Learning with Reduced Overfitting},\n author = {Xu Sun and Xuancheng Ren and Shuming Ma and Houfeng Wang},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {3299--3308},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/sun17c/sun17c.pdf},\n url = \t {https://proceedings.mlr.press/v70/sun17c.html},\n abstract = \t {We propose a simple yet effective technique for neural network learning. The forward propagation is computed as usual. In back propagation, only a small subset of the full gradient is computed to update the model parameters. The gradient vectors are sparsified in such a way that only the top-$k$ elements (in terms of magnitude) are kept. As a result, only $k$ rows or columns (depending on the layout) of the weight matrix are modified, leading to a linear reduction ($k$ divided by the vector dimension) in the computational cost. Surprisingly, experimental results demonstrate that we can update only 1\u20134\\% of the weights at each back propagation pass. This does not result in a larger number of training iterations. More interestingly, the accuracy of the resulting models is actually improved rather than degraded, and a detailed analysis is given.}\n}", "pdf": "http://proceedings.mlr.press/v70/sun17c/sun17c.pdf", "supp": "", "pdf_size": 397136, "gs_citation": 203, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8455055488696792496&as_sdt=2005&sciodt=0,5&hl=en", "gs_version_total": 6, "aff": "School of Electronics Engineering and Computer Science, Peking University, China+MOE Key Laboratory of Computational Linguistics, Peking University, China; School of Electronics Engineering and Computer Science, Peking University, China+MOE Key Laboratory of Computational Linguistics, Peking University, China; School of Electronics Engineering and Computer Science, Peking University, China+MOE Key Laboratory of Computational Linguistics, Peking University, China; School of Electronics Engineering and Computer Science, Peking University, China+MOE Key Laboratory of Computational Linguistics, Peking University, China", "aff_domain": "pku.edu.cn; ; ; ", "email": "pku.edu.cn; ; ; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/sun17c.html", "aff_unique_index": "0+0;0+0;0+0;0+0", "aff_unique_norm": "Peking University", "aff_unique_dep": "School of Electronics Engineering and Computer Science", "aff_unique_url": "http://www.pku.edu.cn", "aff_unique_abbr": "PKU", "aff_campus_unique_index": ";;;", "aff_campus_unique": "", "aff_country_unique_index": "0+0;0+0;0+0;0+0", "aff_country_unique": "China" }, { "title": "\u201cConvex Until Proven Guilty\u201d: Dimension-Free Acceleration of Gradient Descent on Non-Convex Functions", "status": "Poster", "track": "main", "site": "https://icml.cc/virtual/2017/poster/831", "id": "831", "author_site": "Yair Carmon, John Duchi, Oliver Hinder, Aaron Sidford", "author": "Yair Carmon; John C. Duchi; Oliver Hinder; Aaron Sidford", "abstract": "We develop and analyze a variant of Nesterov\u2019s accelerated gradient descent (AGD) for minimization of smooth non-convex functions. We prove that one of two cases occurs: either our AGD variant converges quickly, as if the function was convex, or we produce a certificate that the function is \u201cguilty\u201d of being non-convex. This non-convexity certificate allows us to exploit negative curvature and obtain deterministic, dimension-free acceleration of convergence for non-convex functions. For a function $f$ with Lipschitz continuous gradient and Hessian, we compute a point $x$ with $\\|\\nabla f(x)\\| \\le \\epsilon$ in $O(\\epsilon^{-7/4} \\log(1/ \\epsilon) )$ gradient and function evaluations. Assuming additionally that the third derivative is Lipschitz, we require only $O(\\epsilon^{-5/3} \\log(1/ \\epsilon) )$ evaluations.", "bibtex": "@InProceedings{pmlr-v70-carmon17a,\n title = \t {``{C}onvex Until Proven Guilty'': Dimension-Free Acceleration of Gradient Descent on Non-Convex Functions},\n author = {Yair Carmon and John C. Duchi and Oliver Hinder and Aaron Sidford},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {654--663},\n year = \t {2017},\n editor = \t {Precup, Doina and Teh, Yee Whye},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {06--11 Aug},\n publisher = {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/carmon17a/carmon17a.pdf},\n url = \t {https://proceedings.mlr.press/v70/carmon17a.html},\n abstract = \t {We develop and analyze a variant of Nesterov\u2019s accelerated gradient descent (AGD) for minimization of smooth non-convex functions. We prove that one of two cases occurs: either our AGD variant converges quickly, as if the function was convex, or we produce a certificate that the function is \u201cguilty\u201d of being non-convex. This non-convexity certificate allows us to exploit negative curvature and obtain deterministic, dimension-free acceleration of convergence for non-convex functions. For a function $f$ with Lipschitz continuous gradient and Hessian, we compute a point $x$ with $\\|\\nabla f(x)\\| \\le \\epsilon$ in $O(\\epsilon^{-7/4} \\log(1/ \\epsilon) )$ gradient and function evaluations. Assuming additionally that the third derivative is Lipschitz, we require only $O(\\epsilon^{-5/3} \\log(1/ \\epsilon) )$ evaluations.}\n}", "pdf": "http://proceedings.mlr.press/v70/carmon17a/carmon17a.pdf", "supp": "", "pdf_size": 939063, "gs_citation": 181, "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3885678190237232349&as_sdt=5,33&sciodt=0,33&hl=en", "gs_version_total": 5, "aff": "Stanford University; Stanford University; Stanford University; Stanford University", "aff_domain": "stanford.edu; ;stanford.edu; ", "email": "stanford.edu; ;stanford.edu; ", "github": "", "project": "", "author_num": 4, "oa": "https://proceedings.mlr.press/v70/carmon17a.html", "aff_unique_index": "0;0;0;0", "aff_unique_norm": "Stanford University", "aff_unique_dep": "", "aff_unique_url": "https://www.stanford.edu", "aff_unique_abbr": "Stanford", "aff_campus_unique_index": "0;0;0;0", "aff_campus_unique": "Stanford", "aff_country_unique_index": "0;0;0;0", "aff_country_unique": "United States" } ]